diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-09-09 19:38:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-09-09 21:57:25 -0400 |
commit | 9ee493ce0a60bf42c0f8fd0b0fe91df5704a1cbf (patch) | |
tree | 8dbdbf3d053281291ddc6ebe50d5d0afb5ce22d7 /mm/page_alloc.c | |
parent | aa45484031ddee09b06350ab8528bfe5b2c76d1c (diff) |
mm: page allocator: drain per-cpu lists after direct reclaim allocation fails
When under significant memory pressure, a process enters direct reclaim
and immediately afterwards tries to allocate a page. If it fails and no
further progress is made, it's possible the system will go OOM. However,
on systems with large amounts of memory, it's possible that a significant
number of pages are on per-cpu lists and inaccessible to the calling
process. This leads to a process entering direct reclaim more often than
it should increasing the pressure on the system and compounding the
problem.
This patch notes that if direct reclaim is making progress but allocations
are still failing that the system is already under heavy pressure. In
this case, it drains the per-cpu lists and tries the allocation a second
time before continuing.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b2d21e06d45d..a8cfa9cc6e86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1847,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1847 | struct page *page = NULL; | 1847 | struct page *page = NULL; |
1848 | struct reclaim_state reclaim_state; | 1848 | struct reclaim_state reclaim_state; |
1849 | struct task_struct *p = current; | 1849 | struct task_struct *p = current; |
1850 | bool drained = false; | ||
1850 | 1851 | ||
1851 | cond_resched(); | 1852 | cond_resched(); |
1852 | 1853 | ||
@@ -1865,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1865 | 1866 | ||
1866 | cond_resched(); | 1867 | cond_resched(); |
1867 | 1868 | ||
1868 | if (order != 0) | 1869 | if (unlikely(!(*did_some_progress))) |
1869 | drain_all_pages(); | 1870 | return NULL; |
1870 | 1871 | ||
1871 | if (likely(*did_some_progress)) | 1872 | retry: |
1872 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1873 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
1873 | zonelist, high_zoneidx, | 1874 | zonelist, high_zoneidx, |
1874 | alloc_flags, preferred_zone, | 1875 | alloc_flags, preferred_zone, |
1875 | migratetype); | 1876 | migratetype); |
1877 | |||
1878 | /* | ||
1879 | * If an allocation failed after direct reclaim, it could be because | ||
1880 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
1881 | */ | ||
1882 | if (!page && !drained) { | ||
1883 | drain_all_pages(); | ||
1884 | drained = true; | ||
1885 | goto retry; | ||
1886 | } | ||
1887 | |||
1876 | return page; | 1888 | return page; |
1877 | } | 1889 | } |
1878 | 1890 | ||