aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-05-06 19:03:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-06 19:36:10 -0400
commit3a6be87fd1e5cdbbc3b6a14d02a3efa9ecba1d3f (patch)
treebb58ea44178d8a16861d445df8fa3623ddf02d38 /mm/page_alloc.c
parent9155203a5de94278525647b16733f0c315f3b786 (diff)
nommu: clamp zone_batchsize() to 0 under NOMMU conditions
Clamp zone_batchsize() to 0 under NOMMU conditions to stop free_hot_cold_page() from queueing and batching frees. The problem is that under NOMMU conditions it is really important to be able to allocate large contiguous chunks of memory, but when munmap() or exit_mmap() releases big stretches of memory, return of these to the buddy allocator can be deferred, and when it does finally happen, it can be in small chunks. Whilst the fragmentation this incurs isn't so much of a problem under MMU conditions as userspace VM is glued together from individual pages with the aid of the MMU, it is a real problem if there isn't an MMU. By clamping the page freeing queue size to 0, pages are returned to the allocator immediately, and the buddy detector is more likely to be able to glue them together into large chunks immediately, and fragmentation is less likely to occur. By disabling batching of frees, and by turning off the trimming of excess space during boot, Coldfire can manage to boot. Reported-by: Lanttor Guo <lanttor.guo@freescale.com> Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Lanttor Guo <lanttor.guo@freescale.com> Cc: Greg Ungerer <gerg@snapgear.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8add7daf98b0..fe753ecf2aa5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2681,6 +2681,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
2681 2681
2682static int zone_batchsize(struct zone *zone) 2682static int zone_batchsize(struct zone *zone)
2683{ 2683{
2684#ifdef CONFIG_MMU
2684 int batch; 2685 int batch;
2685 2686
2686 /* 2687 /*
@@ -2709,6 +2710,23 @@ static int zone_batchsize(struct zone *zone)
2709 batch = rounddown_pow_of_two(batch + batch/2) - 1; 2710 batch = rounddown_pow_of_two(batch + batch/2) - 1;
2710 2711
2711 return batch; 2712 return batch;
2713
2714#else
2715 /* The deferral and batching of frees should be suppressed under NOMMU
2716 * conditions.
2717 *
2718 * The problem is that NOMMU needs to be able to allocate large chunks
2719 * of contiguous memory as there's no hardware page translation to
2720 * assemble apparent contiguous memory from discontiguous pages.
2721 *
2722 * Queueing large contiguous runs of pages for batching, however,
2723 * causes the pages to actually be freed in smaller chunks. As there
2724 * can be a significant delay between the individual batches being
2725 * recycled, this leads to the once large chunks of space being
2726 * fragmented and becoming unavailable for high-order allocations.
2727 */
2728 return 0;
2729#endif
2712} 2730}
2713 2731
2714static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2732static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)