aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-04-28 05:12:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:18 -0400
commitdd1a239f6f2d4d3eedd318583ec319aa145b324c (patch)
treeaff4224c96b5e2e67588c3946858a724863eeaf9 /mm/oom_kill.c
parent54a6eb5c4765aa573a030ceeba2c14e3d2ea5706 (diff)
mm: have zonelist contains structs with both a zone pointer and zone_idx
Filtering zonelists requires very frequent use of zone_idx(). This is costly as it involves a lookup of another structure and a substraction operation. As the zone_idx is often required, it should be quickly accessible. The node idx could also be stored here if it was found that accessing zone->node is significant which may be the case on workloads where nodemasks are heavily used. This patch introduces a struct zoneref to store a zone pointer and a zone index. The zonelist then consists of an array of these struct zonerefs which are looked up as necessary. Helpers are given for accessing the zone index as well as the node index. [kamezawa.hiroyu@jp.fujitsu.com: Suggested struct zoneref instead of embedding information in pointers] [hugh@veritas.com: mm-have-zonelist: fix memcg ooms] [hugh@veritas.com: just return do_try_to_free_pages] [hugh@veritas.com: do_try_to_free_pages gfp_mask redundant] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Christoph Lameter <clameter@sgi.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <clameter@sgi.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2c93502cfcb4..e41504aa5da9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -176,7 +176,7 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
176{ 176{
177#ifdef CONFIG_NUMA 177#ifdef CONFIG_NUMA
178 struct zone *zone; 178 struct zone *zone;
179 struct zone **z; 179 struct zoneref *z;
180 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 180 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
181 nodemask_t nodes = node_states[N_HIGH_MEMORY]; 181 nodemask_t nodes = node_states[N_HIGH_MEMORY];
182 182
@@ -462,29 +462,29 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
462 * if a parallel OOM killing is already taking place that includes a zone in 462 * if a parallel OOM killing is already taking place that includes a zone in
463 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. 463 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
464 */ 464 */
465int try_set_zone_oom(struct zonelist *zonelist) 465int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
466{ 466{
467 struct zone **z; 467 struct zoneref *z;
468 struct zone *zone;
468 int ret = 1; 469 int ret = 1;
469 470
470 z = zonelist->zones;
471
472 spin_lock(&zone_scan_mutex); 471 spin_lock(&zone_scan_mutex);
473 do { 472 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
474 if (zone_is_oom_locked(*z)) { 473 if (zone_is_oom_locked(zone)) {
475 ret = 0; 474 ret = 0;
476 goto out; 475 goto out;
477 } 476 }
478 } while (*(++z) != NULL); 477 }
478
479 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
480 /*
481 * Lock each zone in the zonelist under zone_scan_mutex so a
482 * parallel invocation of try_set_zone_oom() doesn't succeed
483 * when it shouldn't.
484 */
485 zone_set_flag(zone, ZONE_OOM_LOCKED);
486 }
479 487
480 /*
481 * Lock each zone in the zonelist under zone_scan_mutex so a parallel
482 * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
483 */
484 z = zonelist->zones;
485 do {
486 zone_set_flag(*z, ZONE_OOM_LOCKED);
487 } while (*(++z) != NULL);
488out: 488out:
489 spin_unlock(&zone_scan_mutex); 489 spin_unlock(&zone_scan_mutex);
490 return ret; 490 return ret;
@@ -495,16 +495,15 @@ out:
495 * allocation attempts with zonelists containing them may now recall the OOM 495 * allocation attempts with zonelists containing them may now recall the OOM
496 * killer, if necessary. 496 * killer, if necessary.
497 */ 497 */
498void clear_zonelist_oom(struct zonelist *zonelist) 498void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
499{ 499{
500 struct zone **z; 500 struct zoneref *z;
501 501 struct zone *zone;
502 z = zonelist->zones;
503 502
504 spin_lock(&zone_scan_mutex); 503 spin_lock(&zone_scan_mutex);
505 do { 504 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
506 zone_clear_flag(*z, ZONE_OOM_LOCKED); 505 zone_clear_flag(zone, ZONE_OOM_LOCKED);
507 } while (*(++z) != NULL); 506 }
508 spin_unlock(&zone_scan_mutex); 507 spin_unlock(&zone_scan_mutex);
509} 508}
510 509