aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2011-07-25 20:12:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:10 -0400
commit76d3fbf8fbf6cc78ceb63549e0e0c5bc8a88f838 (patch)
treecebd9474333db6965fe6af7cc3f652d3091b658b /mm/page_alloc.c
parentcd38b115d5ad79b0100ac6daa103c4fe2c50a913 (diff)
mm: page allocator: reconsider zones for allocation after direct reclaim
With zone_reclaim_mode enabled, it's possible for zones to be considered full in the zonelist_cache so they are skipped in the future. If the process enters direct reclaim, the ZLC may still consider zones to be full even after reclaiming pages. Reconsider all zones for allocation if direct reclaim returns successfully. Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 830a465958de..094472377d81 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1616,6 +1616,21 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1616 set_bit(i, zlc->fullzones); 1616 set_bit(i, zlc->fullzones);
1617} 1617}
1618 1618
1619/*
1620 * clear all zones full, called after direct reclaim makes progress so that
1621 * a zone that was recently full is not skipped over for up to a second
1622 */
1623static void zlc_clear_zones_full(struct zonelist *zonelist)
1624{
1625 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1626
1627 zlc = zonelist->zlcache_ptr;
1628 if (!zlc)
1629 return;
1630
1631 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1632}
1633
1619#else /* CONFIG_NUMA */ 1634#else /* CONFIG_NUMA */
1620 1635
1621static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1636static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
@@ -1632,6 +1647,10 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1632static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1647static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1633{ 1648{
1634} 1649}
1650
1651static void zlc_clear_zones_full(struct zonelist *zonelist)
1652{
1653}
1635#endif /* CONFIG_NUMA */ 1654#endif /* CONFIG_NUMA */
1636 1655
1637/* 1656/*
@@ -1963,6 +1982,10 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1963 if (unlikely(!(*did_some_progress))) 1982 if (unlikely(!(*did_some_progress)))
1964 return NULL; 1983 return NULL;
1965 1984
1985 /* After successful reclaim, reconsider all zones for allocation */
1986 if (NUMA_BUILD)
1987 zlc_clear_zones_full(zonelist);
1988
1966retry: 1989retry:
1967 page = get_page_from_freelist(gfp_mask, nodemask, order, 1990 page = get_page_from_freelist(gfp_mask, nodemask, order,
1968 zonelist, high_zoneidx, 1991 zonelist, high_zoneidx,