diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-06-16 18:32:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:36 -0400 |
commit | 72807a74c0172376bba6b5b27702c9f702b526e9 (patch) | |
tree | d0f4a4a835754286ec9e2b940e950b2b68a4f61b /mm/page_alloc.c | |
parent | 092cead6175bb1b3d3078a34ba71c939d526c70b (diff) |
page allocator: sanity check order in the page allocator slow path
Callers may speculatively call different allocators in order of preference
trying to allocate a buffer of a given size. The order needed to allocate
this may be larger than what the page allocator can normally handle.
While the allocator mostly does the right thing, it should not direct
reclaim or wakeup kswapd with a bogus order. This patch sanity checks the
order in the slow path and returns NULL if it is too large.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5dac5d8cb148..85759cdd6973 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1446,9 +1446,6 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, | |||
1446 | int zlc_active = 0; /* set if using zonelist_cache */ | 1446 | int zlc_active = 0; /* set if using zonelist_cache */ |
1447 | int did_zlc_setup = 0; /* just call zlc_setup() one time */ | 1447 | int did_zlc_setup = 0; /* just call zlc_setup() one time */ |
1448 | 1448 | ||
1449 | if (WARN_ON_ONCE(order >= MAX_ORDER)) | ||
1450 | return NULL; | ||
1451 | |||
1452 | classzone_idx = zone_idx(preferred_zone); | 1449 | classzone_idx = zone_idx(preferred_zone); |
1453 | zonelist_scan: | 1450 | zonelist_scan: |
1454 | /* | 1451 | /* |
@@ -1707,6 +1704,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
1707 | struct task_struct *p = current; | 1704 | struct task_struct *p = current; |
1708 | 1705 | ||
1709 | /* | 1706 | /* |
1707 | * In the slowpath, we sanity check order to avoid ever trying to | ||
1708 | * reclaim >= MAX_ORDER areas which will never succeed. Callers may | ||
1709 | * be using allocators in order of preference for an area that is | ||
1710 | * too large. | ||
1711 | */ | ||
1712 | if (WARN_ON_ONCE(order >= MAX_ORDER)) | ||
1713 | return NULL; | ||
1714 | |||
1715 | /* | ||
1710 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and | 1716 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and |
1711 | * __GFP_NOWARN set) should not cause reclaim since the subsystem | 1717 | * __GFP_NOWARN set) should not cause reclaim since the subsystem |
1712 | * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim | 1718 | * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim |