aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig28
-rw-r--r--mm/madvise.c8
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/oom_kill.c44
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/pdflush.c31
-rw-r--r--mm/vmalloc.c1
7 files changed, 82 insertions, 54 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 57971d2ab848..c2b57d81e153 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -225,3 +225,31 @@ config HAVE_MLOCKED_PAGE_BIT
225 225
226config MMU_NOTIFIER 226config MMU_NOTIFIER
227 bool 227 bool
228
229config NOMMU_INITIAL_TRIM_EXCESS
230 int "Turn on mmap() excess space trimming before booting"
231 depends on !MMU
232 default 1
233 help
234 The NOMMU mmap() frequently needs to allocate large contiguous chunks
235 of memory on which to store mappings, but it can only ask the system
236 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
237 more than it requires. To deal with this, mmap() is able to trim off
238 the excess and return it to the allocator.
239
240 If trimming is enabled, the excess is trimmed off and returned to the
241 system allocator, which can cause extra fragmentation, particularly
242 if there are a lot of transient processes.
243
244 If trimming is disabled, the excess is kept, but not used, which for
245 long-term mappings means that the space is wasted.
246
247 Trimming can be dynamically controlled through a sysctl option
248 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
249 excess pages there must be before trimming should occur, or zero if
250 no trimming is to occur.
251
252 This option specifies the initial value of this option. The default
253 of 1 says that all excess pages should be trimmed.
254
255 See Documentation/nommu-mmap.txt for more information.
diff --git a/mm/madvise.c b/mm/madvise.c
index 36d6ea2b6340..b9ce574827c8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,14 +112,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
112 if (!file) 112 if (!file)
113 return -EBADF; 113 return -EBADF;
114 114
115 /*
116 * Page cache readahead assumes page cache pages are order-0 which
117 * is not the case for hugetlbfs. Do not give a bad return value
118 * but ignore the advice.
119 */
120 if (vma->vm_flags & VM_HUGETLB)
121 return 0;
122
123 if (file->f_mapping->a_ops->get_xip_mem) { 115 if (file->f_mapping->a_ops->get_xip_mem) {
124 /* no bad return value, but ignore advice */ 116 /* no bad return value, but ignore advice */
125 return 0; 117 return 0;
diff --git a/mm/nommu.c b/mm/nommu.c
index 809998aa7b50..b571ef707428 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -66,7 +66,7 @@ struct percpu_counter vm_committed_as;
66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
67int sysctl_overcommit_ratio = 50; /* default is 50% */ 67int sysctl_overcommit_ratio = 50; /* default is 50% */
68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ 69int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70int heap_stack_gap = 0; 70int heap_stack_gap = 0;
71 71
72atomic_long_t mmap_pages_allocated; 72atomic_long_t mmap_pages_allocated;
@@ -515,8 +515,6 @@ static void add_nommu_region(struct vm_region *region)
515 515
516 validate_nommu_regions(); 516 validate_nommu_regions();
517 517
518 BUG_ON(region->vm_start & ~PAGE_MASK);
519
520 parent = NULL; 518 parent = NULL;
521 p = &nommu_region_tree.rb_node; 519 p = &nommu_region_tree.rb_node;
522 while (*p) { 520 while (*p) {
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2f3166e308d9..92bcf1db16b2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -514,34 +514,32 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
514 */ 514 */
515static void __out_of_memory(gfp_t gfp_mask, int order) 515static void __out_of_memory(gfp_t gfp_mask, int order)
516{ 516{
517 if (sysctl_oom_kill_allocating_task) { 517 struct task_struct *p;
518 oom_kill_process(current, gfp_mask, order, 0, NULL, 518 unsigned long points;
519 "Out of memory (oom_kill_allocating_task)");
520
521 } else {
522 unsigned long points;
523 struct task_struct *p;
524
525retry:
526 /*
527 * Rambo mode: Shoot down a process and hope it solves whatever
528 * issues we may have.
529 */
530 p = select_bad_process(&points, NULL);
531 519
532 if (PTR_ERR(p) == -1UL) 520 if (sysctl_oom_kill_allocating_task)
521 if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
522 "Out of memory (oom_kill_allocating_task)"))
533 return; 523 return;
524retry:
525 /*
526 * Rambo mode: Shoot down a process and hope it solves whatever
527 * issues we may have.
528 */
529 p = select_bad_process(&points, NULL);
534 530
535 /* Found nothing?!?! Either we hang forever, or we panic. */ 531 if (PTR_ERR(p) == -1UL)
536 if (!p) { 532 return;
537 read_unlock(&tasklist_lock);
538 panic("Out of memory and no killable processes...\n");
539 }
540 533
541 if (oom_kill_process(p, gfp_mask, order, points, NULL, 534 /* Found nothing?!?! Either we hang forever, or we panic. */
542 "Out of memory")) 535 if (!p) {
543 goto retry; 536 read_unlock(&tasklist_lock);
537 panic("Out of memory and no killable processes...\n");
544 } 538 }
539
540 if (oom_kill_process(p, gfp_mask, order, points, NULL,
541 "Out of memory"))
542 goto retry;
545} 543}
546 544
547/* 545/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e2f26991fff1..fe753ecf2aa5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2681,6 +2681,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
2681 2681
2682static int zone_batchsize(struct zone *zone) 2682static int zone_batchsize(struct zone *zone)
2683{ 2683{
2684#ifdef CONFIG_MMU
2684 int batch; 2685 int batch;
2685 2686
2686 /* 2687 /*
@@ -2706,9 +2707,26 @@ static int zone_batchsize(struct zone *zone)
2706 * of pages of one half of the possible page colors 2707 * of pages of one half of the possible page colors
2707 * and the other with pages of the other colors. 2708 * and the other with pages of the other colors.
2708 */ 2709 */
2709 batch = (1 << (fls(batch + batch/2)-1)) - 1; 2710 batch = rounddown_pow_of_two(batch + batch/2) - 1;
2710 2711
2711 return batch; 2712 return batch;
2713
2714#else
2715 /* The deferral and batching of frees should be suppressed under NOMMU
2716 * conditions.
2717 *
2718 * The problem is that NOMMU needs to be able to allocate large chunks
2719 * of contiguous memory as there's no hardware page translation to
2720 * assemble apparent contiguous memory from discontiguous pages.
2721 *
2722 * Queueing large contiguous runs of pages for batching, however,
2723 * causes the pages to actually be freed in smaller chunks. As there
2724 * can be a significant delay between the individual batches being
2725 * recycled, this leads to the once large chunks of space being
2726 * fragmented and becoming unavailable for high-order allocations.
2727 */
2728 return 0;
2729#endif
2712} 2730}
2713 2731
2714static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2732static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
diff --git a/mm/pdflush.c b/mm/pdflush.c
index f2caf96993f8..235ac440c44e 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -58,14 +58,6 @@ static DEFINE_SPINLOCK(pdflush_lock);
58int nr_pdflush_threads = 0; 58int nr_pdflush_threads = 0;
59 59
60/* 60/*
61 * The max/min number of pdflush threads. R/W by sysctl at
62 * /proc/sys/vm/nr_pdflush_threads_max/min
63 */
64int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS;
65int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS;
66
67
68/*
69 * The time at which the pdflush thread pool last went empty 61 * The time at which the pdflush thread pool last went empty
70 */ 62 */
71static unsigned long last_empty_jifs; 63static unsigned long last_empty_jifs;
@@ -76,7 +68,7 @@ static unsigned long last_empty_jifs;
76 * Thread pool management algorithm: 68 * Thread pool management algorithm:
77 * 69 *
78 * - The minimum and maximum number of pdflush instances are bound 70 * - The minimum and maximum number of pdflush instances are bound
79 * by nr_pdflush_threads_min and nr_pdflush_threads_max. 71 * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
80 * 72 *
81 * - If there have been no idle pdflush instances for 1 second, create 73 * - If there have been no idle pdflush instances for 1 second, create
82 * a new one. 74 * a new one.
@@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work)
142 * To throttle creation, we reset last_empty_jifs. 134 * To throttle creation, we reset last_empty_jifs.
143 */ 135 */
144 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { 136 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
145 if (list_empty(&pdflush_list) && 137 if (list_empty(&pdflush_list)) {
146 nr_pdflush_threads < nr_pdflush_threads_max) { 138 if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
147 last_empty_jifs = jiffies; 139 last_empty_jifs = jiffies;
148 nr_pdflush_threads++; 140 nr_pdflush_threads++;
149 spin_unlock_irq(&pdflush_lock); 141 spin_unlock_irq(&pdflush_lock);
150 start_one_pdflush_thread(); 142 start_one_pdflush_thread();
151 spin_lock_irq(&pdflush_lock); 143 spin_lock_irq(&pdflush_lock);
144 }
152 } 145 }
153 } 146 }
154 147
@@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work)
160 */ 153 */
161 if (list_empty(&pdflush_list)) 154 if (list_empty(&pdflush_list))
162 continue; 155 continue;
163 if (nr_pdflush_threads <= nr_pdflush_threads_min) 156 if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
164 continue; 157 continue;
165 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); 158 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
166 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { 159 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
@@ -266,9 +259,9 @@ static int __init pdflush_init(void)
266 * Pre-set nr_pdflush_threads... If we fail to create, 259 * Pre-set nr_pdflush_threads... If we fail to create,
267 * the count will be decremented. 260 * the count will be decremented.
268 */ 261 */
269 nr_pdflush_threads = nr_pdflush_threads_min; 262 nr_pdflush_threads = MIN_PDFLUSH_THREADS;
270 263
271 for (i = 0; i < nr_pdflush_threads_min; i++) 264 for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
272 start_one_pdflush_thread(); 265 start_one_pdflush_thread();
273 return 0; 266 return 0;
274} 267}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fab19876b4d1..083716ea38c9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -402,6 +402,7 @@ overflow:
402 printk(KERN_WARNING 402 printk(KERN_WARNING
403 "vmap allocation for size %lu failed: " 403 "vmap allocation for size %lu failed: "
404 "use vmalloc=<size> to increase size.\n", size); 404 "use vmalloc=<size> to increase size.\n", size);
405 kfree(va);
405 return ERR_PTR(-EBUSY); 406 return ERR_PTR(-EBUSY);
406 } 407 }
407 408