diff options
| -rw-r--r-- | mm/page_alloc.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8add7daf98b0..fe753ecf2aa5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -2681,6 +2681,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) | |||
| 2681 | 2681 | ||
| 2682 | static int zone_batchsize(struct zone *zone) | 2682 | static int zone_batchsize(struct zone *zone) |
| 2683 | { | 2683 | { |
| 2684 | #ifdef CONFIG_MMU | ||
| 2684 | int batch; | 2685 | int batch; |
| 2685 | 2686 | ||
| 2686 | /* | 2687 | /* |
| @@ -2709,6 +2710,23 @@ static int zone_batchsize(struct zone *zone) | |||
| 2709 | batch = rounddown_pow_of_two(batch + batch/2) - 1; | 2710 | batch = rounddown_pow_of_two(batch + batch/2) - 1; |
| 2710 | 2711 | ||
| 2711 | return batch; | 2712 | return batch; |
| 2713 | |||
| 2714 | #else | ||
| 2715 | /* The deferral and batching of frees should be suppressed under NOMMU | ||
| 2716 | * conditions. | ||
| 2717 | * | ||
| 2718 | * The problem is that NOMMU needs to be able to allocate large chunks | ||
| 2719 | * of contiguous memory as there's no hardware page translation to | ||
| 2720 | * assemble apparent contiguous memory from discontiguous pages. | ||
| 2721 | * | ||
| 2722 | * Queueing large contiguous runs of pages for batching, however, | ||
| 2723 | * causes the pages to actually be freed in smaller chunks. As there | ||
| 2724 | * can be a significant delay between the individual batches being | ||
| 2725 | * recycled, this leads to the once large chunks of space being | ||
| 2726 | * fragmented and becoming unavailable for high-order allocations. | ||
| 2727 | */ | ||
| 2728 | return 0; | ||
| 2729 | #endif | ||
| 2712 | } | 2730 | } |
| 2713 | 2731 | ||
| 2714 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 2732 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
