aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-05-24 17:32:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:06:59 -0400
commit56de7263fcf3eb10c8dcdf8d59a9cec831795f3f (patch)
tree164637c0b678e20adfdcec4129563d9234faf405 /mm/page_alloc.c
parented4a6d7f0676db50b5023cc01f6cda82a2f2a307 (diff)
mm: compaction: direct compact when a high-order allocation fails
Ordinarily when a high-order allocation fails, direct reclaim is entered to free pages to satisfy the allocation. With this patch, it is determined if an allocation failed due to external fragmentation instead of low memory and if so, the calling process will compact until a suitable page is freed. Compaction by moving pages in memory is considerably cheaper than paging out to disk and works where there are locked pages or no swap. If compaction fails to free a page of a suitable size, then reclaim will still occur. Direct compaction returns as soon as possible. As each block is compacted, it is checked if a suitable page has been freed and if so, it returns. [akpm@linux-foundation.org: Fix build errors] [aarcange@redhat.com: fix count_vm_event preempt in memory compaction direct reclaim] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c54376a09f30..cd88a860f088 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -49,6 +49,7 @@
49#include <linux/debugobjects.h> 49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h> 50#include <linux/kmemleak.h>
51#include <linux/memory.h> 51#include <linux/memory.h>
52#include <linux/compaction.h>
52#include <trace/events/kmem.h> 53#include <trace/events/kmem.h>
53#include <linux/ftrace_event.h> 54#include <linux/ftrace_event.h>
54 55
@@ -1758,6 +1759,59 @@ out:
1758 return page; 1759 return page;
1759} 1760}
1760 1761
1762#ifdef CONFIG_COMPACTION
1763/* Try memory compaction for high-order allocations before reclaim */
1764static struct page *
1765__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1766 struct zonelist *zonelist, enum zone_type high_zoneidx,
1767 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1768 int migratetype, unsigned long *did_some_progress)
1769{
1770 struct page *page;
1771
1772 if (!order)
1773 return NULL;
1774
1775 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1776 nodemask);
1777 if (*did_some_progress != COMPACT_SKIPPED) {
1778
1779 /* Page migration frees to the PCP lists but we want merging */
1780 drain_pages(get_cpu());
1781 put_cpu();
1782
1783 page = get_page_from_freelist(gfp_mask, nodemask,
1784 order, zonelist, high_zoneidx,
1785 alloc_flags, preferred_zone,
1786 migratetype);
1787 if (page) {
1788 count_vm_event(COMPACTSUCCESS);
1789 return page;
1790 }
1791
1792 /*
1793 * It's bad if compaction run occurs and fails.
1794 * The most likely reason is that pages exist,
1795 * but not enough to satisfy watermarks.
1796 */
1797 count_vm_event(COMPACTFAIL);
1798
1799 cond_resched();
1800 }
1801
1802 return NULL;
1803}
1804#else
1805static inline struct page *
1806__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1807 struct zonelist *zonelist, enum zone_type high_zoneidx,
1808 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1809 int migratetype, unsigned long *did_some_progress)
1810{
1811 return NULL;
1812}
1813#endif /* CONFIG_COMPACTION */
1814
1761/* The really slow allocator path where we enter direct reclaim */ 1815/* The really slow allocator path where we enter direct reclaim */
1762static inline struct page * 1816static inline struct page *
1763__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 1817__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -1944,6 +1998,15 @@ rebalance:
1944 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 1998 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1945 goto nopage; 1999 goto nopage;
1946 2000
2001 /* Try direct compaction */
2002 page = __alloc_pages_direct_compact(gfp_mask, order,
2003 zonelist, high_zoneidx,
2004 nodemask,
2005 alloc_flags, preferred_zone,
2006 migratetype, &did_some_progress);
2007 if (page)
2008 goto got_pg;
2009
1947 /* Try direct reclaim and then allocating */ 2010 /* Try direct reclaim and then allocating */
1948 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2011 page = __alloc_pages_direct_reclaim(gfp_mask, order,
1949 zonelist, high_zoneidx, 2012 zonelist, high_zoneidx,