aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-08-06 19:07:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:21 -0400
commite972a070e2d3296cd2e2cc2fd0561ce89a1d5ebf (patch)
tree567ad846183dc3f226cddeb0d7c353fb640b047e /mm
parent8d060bf490930f305c4efc45724e861a268f4d2f (diff)
mm, oom: rename zonelist locking functions
try_set_zonelist_oom() and clear_zonelist_oom() are not named properly to imply that they require locking semantics to avoid out_of_memory() being reordered. zone_scan_lock is required for both functions to ensure that there is proper locking synchronization. Rename try_set_zonelist_oom() to oom_zonelist_trylock() and rename clear_zonelist_oom() to oom_zonelist_unlock() to imply there is proper locking semantics. At the same time, convert oom_zonelist_trylock() to return bool instead of int since only success and failure are tested. Signed-off-by: David Rientjes <rientjes@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/oom_kill.c30
-rw-r--r--mm/page_alloc.c6
2 files changed, 16 insertions, 20 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b0a1e1ff0353..d33aca1552ad 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -559,28 +559,25 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
559 * if a parallel OOM killing is already taking place that includes a zone in 559 * if a parallel OOM killing is already taking place that includes a zone in
560 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. 560 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
561 */ 561 */
562int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) 562bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask)
563{ 563{
564 struct zoneref *z; 564 struct zoneref *z;
565 struct zone *zone; 565 struct zone *zone;
566 int ret = 1; 566 bool ret = true;
567 567
568 spin_lock(&zone_scan_lock); 568 spin_lock(&zone_scan_lock);
569 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 569 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
570 if (zone_is_oom_locked(zone)) { 570 if (zone_is_oom_locked(zone)) {
571 ret = 0; 571 ret = false;
572 goto out; 572 goto out;
573 } 573 }
574 }
575 574
576 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 575 /*
577 /* 576 * Lock each zone in the zonelist under zone_scan_lock so a parallel
578 * Lock each zone in the zonelist under zone_scan_lock so a 577 * call to oom_zonelist_trylock() doesn't succeed when it shouldn't.
579 * parallel invocation of try_set_zonelist_oom() doesn't succeed 578 */
580 * when it shouldn't. 579 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
581 */
582 zone_set_flag(zone, ZONE_OOM_LOCKED); 580 zone_set_flag(zone, ZONE_OOM_LOCKED);
583 }
584 581
585out: 582out:
586 spin_unlock(&zone_scan_lock); 583 spin_unlock(&zone_scan_lock);
@@ -592,15 +589,14 @@ out:
592 * allocation attempts with zonelists containing them may now recall the OOM 589 * allocation attempts with zonelists containing them may now recall the OOM
593 * killer, if necessary. 590 * killer, if necessary.
594 */ 591 */
595void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) 592void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask)
596{ 593{
597 struct zoneref *z; 594 struct zoneref *z;
598 struct zone *zone; 595 struct zone *zone;
599 596
600 spin_lock(&zone_scan_lock); 597 spin_lock(&zone_scan_lock);
601 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 598 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask))
602 zone_clear_flag(zone, ZONE_OOM_LOCKED); 599 zone_clear_flag(zone, ZONE_OOM_LOCKED);
603 }
604 spin_unlock(&zone_scan_lock); 600 spin_unlock(&zone_scan_lock);
605} 601}
606 602
@@ -695,8 +691,8 @@ void pagefault_out_of_memory(void)
695 return; 691 return;
696 692
697 zonelist = node_zonelist(first_memory_node, GFP_KERNEL); 693 zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
698 if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) { 694 if (oom_zonelist_trylock(zonelist, GFP_KERNEL)) {
699 out_of_memory(NULL, 0, 0, NULL, false); 695 out_of_memory(NULL, 0, 0, NULL, false);
700 clear_zonelist_oom(zonelist, GFP_KERNEL); 696 oom_zonelist_unlock(zonelist, GFP_KERNEL);
701 } 697 }
702} 698}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fb9908148474..578236089ec1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2246,8 +2246,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2246{ 2246{
2247 struct page *page; 2247 struct page *page;
2248 2248
2249 /* Acquire the OOM killer lock for the zones in zonelist */ 2249 /* Acquire the per-zone oom lock for each zone */
2250 if (!try_set_zonelist_oom(zonelist, gfp_mask)) { 2250 if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
2251 schedule_timeout_uninterruptible(1); 2251 schedule_timeout_uninterruptible(1);
2252 return NULL; 2252 return NULL;
2253 } 2253 }
@@ -2285,7 +2285,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2285 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2285 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2286 2286
2287out: 2287out:
2288 clear_zonelist_oom(zonelist, gfp_mask); 2288 oom_zonelist_unlock(zonelist, gfp_mask);
2289 return page; 2289 return page;
2290} 2290}
2291 2291