aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c30
-rw-r--r--mm/mlock.c21
-rw-r--r--mm/mmap.c24
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/oom_kill.c16
-rw-r--r--mm/page-writeback.c30
-rw-r--r--mm/rmap.c19
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c4
9 files changed, 106 insertions, 53 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b6e5fd23cc5..6b2ab105185 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2760,21 +2760,35 @@ out_release:
2760} 2760}
2761 2761
2762/* 2762/*
2763 * This is like a special single-page "expand_downwards()", 2763 * This is like a special single-page "expand_{down|up}wards()",
2764 * except we must first make sure that 'address-PAGE_SIZE' 2764 * except we must first make sure that 'address{-|+}PAGE_SIZE'
2765 * doesn't hit another vma. 2765 * doesn't hit another vma.
2766 *
2767 * The "find_vma()" will do the right thing even if we wrap
2768 */ 2766 */
2769static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) 2767static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2770{ 2768{
2771 address &= PAGE_MASK; 2769 address &= PAGE_MASK;
2772 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { 2770 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2773 address -= PAGE_SIZE; 2771 struct vm_area_struct *prev = vma->vm_prev;
2774 if (find_vma(vma->vm_mm, address) != vma) 2772
2775 return -ENOMEM; 2773 /*
2774 * Is there a mapping abutting this one below?
2775 *
2776 * That's only ok if it's the same stack mapping
2777 * that has gotten split..
2778 */
2779 if (prev && prev->vm_end == address)
2780 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2781
2782 expand_stack(vma, address - PAGE_SIZE);
2783 }
2784 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2785 struct vm_area_struct *next = vma->vm_next;
2786
2787 /* As VM_GROWSDOWN but s/below/above/ */
2788 if (next && next->vm_start == address + PAGE_SIZE)
2789 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2776 2790
2777 expand_stack(vma, address); 2791 expand_upwards(vma, address + PAGE_SIZE);
2778 } 2792 }
2779 return 0; 2793 return 0;
2780} 2794}
diff --git a/mm/mlock.c b/mm/mlock.c
index 49e5e4cb823..cbae7c5b956 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
135 } 135 }
136} 136}
137 137
138/* Is the vma a continuation of the stack vma above it? */
139static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
140{
141 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
142}
143
144static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
145{
146 return (vma->vm_flags & VM_GROWSDOWN) &&
147 (vma->vm_start == addr) &&
148 !vma_stack_continue(vma->vm_prev, addr);
149}
150
138/** 151/**
139 * __mlock_vma_pages_range() - mlock a range of pages in the vma. 152 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
140 * @vma: target vma 153 * @vma: target vma
@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
168 gup_flags |= FOLL_WRITE; 181 gup_flags |= FOLL_WRITE;
169 182
170 /* We don't try to access the guard page of a stack vma */ 183 /* We don't try to access the guard page of a stack vma */
171 if (vma->vm_flags & VM_GROWSDOWN) { 184 if (stack_guard_page(vma, start)) {
172 if (start == vma->vm_start) { 185 addr += PAGE_SIZE;
173 start += PAGE_SIZE; 186 nr_pages--;
174 nr_pages--;
175 }
176 } 187 }
177 188
178 while (nr_pages > 0) { 189 while (nr_pages > 0) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 31003338b97..6128dc8e5ed 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -388,17 +388,23 @@ static inline void
388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 388__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
389 struct vm_area_struct *prev, struct rb_node *rb_parent) 389 struct vm_area_struct *prev, struct rb_node *rb_parent)
390{ 390{
391 struct vm_area_struct *next;
392
393 vma->vm_prev = prev;
391 if (prev) { 394 if (prev) {
392 vma->vm_next = prev->vm_next; 395 next = prev->vm_next;
393 prev->vm_next = vma; 396 prev->vm_next = vma;
394 } else { 397 } else {
395 mm->mmap = vma; 398 mm->mmap = vma;
396 if (rb_parent) 399 if (rb_parent)
397 vma->vm_next = rb_entry(rb_parent, 400 next = rb_entry(rb_parent,
398 struct vm_area_struct, vm_rb); 401 struct vm_area_struct, vm_rb);
399 else 402 else
400 vma->vm_next = NULL; 403 next = NULL;
401 } 404 }
405 vma->vm_next = next;
406 if (next)
407 next->vm_prev = vma;
402} 408}
403 409
404void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 410void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -483,7 +489,11 @@ static inline void
483__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 489__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
484 struct vm_area_struct *prev) 490 struct vm_area_struct *prev)
485{ 491{
486 prev->vm_next = vma->vm_next; 492 struct vm_area_struct *next = vma->vm_next;
493
494 prev->vm_next = next;
495 if (next)
496 next->vm_prev = prev;
487 rb_erase(&vma->vm_rb, &mm->mm_rb); 497 rb_erase(&vma->vm_rb, &mm->mm_rb);
488 if (mm->mmap_cache == vma) 498 if (mm->mmap_cache == vma)
489 mm->mmap_cache = prev; 499 mm->mmap_cache = prev;
@@ -1706,9 +1716,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1706 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 1716 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1707 * vma is the last one with address > vma->vm_end. Have to extend vma. 1717 * vma is the last one with address > vma->vm_end. Have to extend vma.
1708 */ 1718 */
1709#ifndef CONFIG_IA64
1710static
1711#endif
1712int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1719int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1713{ 1720{
1714 int error; 1721 int error;
@@ -1915,6 +1922,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1915 unsigned long addr; 1922 unsigned long addr;
1916 1923
1917 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1924 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1925 vma->vm_prev = NULL;
1918 do { 1926 do {
1919 rb_erase(&vma->vm_rb, &mm->mm_rb); 1927 rb_erase(&vma->vm_rb, &mm->mm_rb);
1920 mm->map_count--; 1928 mm->map_count--;
@@ -1922,6 +1930,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1922 vma = vma->vm_next; 1930 vma = vma->vm_next;
1923 } while (vma && vma->vm_start < end); 1931 } while (vma && vma->vm_start < end);
1924 *insertion_point = vma; 1932 *insertion_point = vma;
1933 if (vma)
1934 vma->vm_prev = prev;
1925 tail_vma->vm_next = NULL; 1935 tail_vma->vm_next = NULL;
1926 if (mm->unmap_area == arch_unmap_area) 1936 if (mm->unmap_area == arch_unmap_area)
1927 addr = prev ? prev->vm_end : mm->mmap_base; 1937 addr = prev ? prev->vm_end : mm->mmap_base;
diff --git a/mm/nommu.c b/mm/nommu.c
index efa9a380335..88ff091eb07 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
604 */ 604 */
605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 605static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
606{ 606{
607 struct vm_area_struct *pvma, **pp; 607 struct vm_area_struct *pvma, **pp, *next;
608 struct address_space *mapping; 608 struct address_space *mapping;
609 struct rb_node **p, *parent; 609 struct rb_node **p, *parent;
610 610
@@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
664 break; 664 break;
665 } 665 }
666 666
667 vma->vm_next = *pp; 667 next = *pp;
668 *pp = vma; 668 *pp = vma;
669 vma->vm_next = next;
670 if (next)
671 next->vm_prev = vma;
669} 672}
670 673
671/* 674/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5014e50644d..fc81cb22869 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
372 } 372 }
373 373
374 pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", 374 pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
375 task->pid, __task_cred(task)->uid, task->tgid, 375 task->pid, task_uid(task), task->tgid,
376 task->mm->total_vm, get_mm_rss(task->mm), 376 task->mm->total_vm, get_mm_rss(task->mm),
377 task_cpu(task), task->signal->oom_adj, 377 task_cpu(task), task->signal->oom_adj,
378 task->signal->oom_score_adj, task->comm); 378 task->signal->oom_score_adj, task->comm);
@@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
401static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) 401static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
402{ 402{
403 p = find_lock_task_mm(p); 403 p = find_lock_task_mm(p);
404 if (!p) { 404 if (!p)
405 task_unlock(p);
406 return 1; 405 return 1;
407 } 406
408 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 407 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
409 task_pid_nr(p), p->comm, K(p->mm->total_vm), 408 task_pid_nr(p), p->comm, K(p->mm->total_vm),
410 K(get_mm_counter(p->mm, MM_ANONPAGES)), 409 K(get_mm_counter(p->mm, MM_ANONPAGES)),
@@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
647 unsigned long freed = 0; 646 unsigned long freed = 0;
648 unsigned int points; 647 unsigned int points;
649 enum oom_constraint constraint = CONSTRAINT_NONE; 648 enum oom_constraint constraint = CONSTRAINT_NONE;
649 int killed = 0;
650 650
651 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 651 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
652 if (freed > 0) 652 if (freed > 0)
@@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
684 if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, 684 if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
685 NULL, nodemask, 685 NULL, nodemask,
686 "Out of memory (oom_kill_allocating_task)")) 686 "Out of memory (oom_kill_allocating_task)"))
687 return; 687 goto out;
688 } 688 }
689 689
690retry: 690retry:
@@ -692,7 +692,7 @@ retry:
692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : 692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
693 NULL); 693 NULL);
694 if (PTR_ERR(p) == -1UL) 694 if (PTR_ERR(p) == -1UL)
695 return; 695 goto out;
696 696
697 /* Found nothing?!?! Either we hang forever, or we panic. */ 697 /* Found nothing?!?! Either we hang forever, or we panic. */
698 if (!p) { 698 if (!p) {
@@ -704,13 +704,15 @@ retry:
704 if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, 704 if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
705 nodemask, "Out of memory")) 705 nodemask, "Out of memory"))
706 goto retry; 706 goto retry;
707 killed = 1;
708out:
707 read_unlock(&tasklist_lock); 709 read_unlock(&tasklist_lock);
708 710
709 /* 711 /*
710 * Give "p" a good chance of killing itself before we 712 * Give "p" a good chance of killing itself before we
711 * retry to allocate memory unless "p" is current 713 * retry to allocate memory unless "p" is current
712 */ 714 */
713 if (!test_thread_flag(TIF_MEMDIE)) 715 if (killed && !test_thread_flag(TIF_MEMDIE))
714 schedule_timeout_uninterruptible(1); 716 schedule_timeout_uninterruptible(1);
715} 717}
716 718
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7262aacea8a..e3bccac1f02 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping,
836 spin_unlock_irq(&mapping->tree_lock); 836 spin_unlock_irq(&mapping->tree_lock);
837 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); 837 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
838 cond_resched(); 838 cond_resched();
839 } while (tagged >= WRITEBACK_TAG_BATCH); 839 /* We check 'start' to handle wrapping when end == ~0UL */
840 } while (tagged >= WRITEBACK_TAG_BATCH && start);
840} 841}
841EXPORT_SYMBOL(tag_pages_for_writeback); 842EXPORT_SYMBOL(tag_pages_for_writeback);
842 843
@@ -984,22 +985,16 @@ continue_unlock:
984 } 985 }
985 } 986 }
986 987
987 if (wbc->nr_to_write > 0) { 988 /*
988 if (--wbc->nr_to_write == 0 && 989 * We stop writing back only if we are not doing
989 wbc->sync_mode == WB_SYNC_NONE) { 990 * integrity sync. In case of integrity sync we have to
990 /* 991 * keep going until we have written all the pages
991 * We stop writing back only if we are 992 * we tagged for writeback prior to entering this loop.
992 * not doing integrity sync. In case of 993 */
993 * integrity sync we have to keep going 994 if (--wbc->nr_to_write <= 0 &&
994 * because someone may be concurrently 995 wbc->sync_mode == WB_SYNC_NONE) {
995 * dirtying pages, and we might have 996 done = 1;
996 * synced a lot of newly appeared dirty 997 break;
997 * pages, but have not synced all of the
998 * old dirty pages.
999 */
1000 done = 1;
1001 break;
1002 }
1003 } 998 }
1004 } 999 }
1005 pagevec_release(&pvec); 1000 pagevec_release(&pvec);
@@ -1131,6 +1126,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1131 task_io_account_write(PAGE_CACHE_SIZE); 1126 task_io_account_write(PAGE_CACHE_SIZE);
1132 } 1127 }
1133} 1128}
1129EXPORT_SYMBOL(account_page_dirtied);
1134 1130
1135/* 1131/*
1136 * For address_spaces which do not use buffers. Just tag the page as dirty in 1132 * For address_spaces which do not use buffers. Just tag the page as dirty in
diff --git a/mm/rmap.c b/mm/rmap.c
index 87b9e8ad450..f6f0d2dda2e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
321 321
322 rcu_read_lock(); 322 rcu_read_lock();
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
327 goto out; 327 goto out;
328 328
329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
330 anon_vma_lock(anon_vma); 330 root_anon_vma = ACCESS_ONCE(anon_vma->root);
331 return anon_vma; 331 spin_lock(&root_anon_vma->lock);
332
333 /*
334 * If this page is still mapped, then its anon_vma cannot have been
335 * freed. But if it has been unmapped, we have no security against
336 * the anon_vma structure being freed and reused (for another anon_vma:
337 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
338 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
339 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
340 */
341 if (page_mapped(page))
342 return anon_vma;
343
344 spin_unlock(&root_anon_vma->lock);
332out: 345out:
333 rcu_read_unlock(); 346 rcu_read_unlock();
334 return NULL; 347 return NULL;
diff --git a/mm/shmem.c b/mm/shmem.c
index dfaa0f4e978..080b09a57a8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2325,7 +2325,10 @@ static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2325 2325
2326static void shmem_put_super(struct super_block *sb) 2326static void shmem_put_super(struct super_block *sb)
2327{ 2327{
2328 kfree(sb->s_fs_info); 2328 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2329
2330 percpu_counter_destroy(&sbinfo->used_blocks);
2331 kfree(sbinfo);
2329 sb->s_fs_info = NULL; 2332 sb->s_fs_info = NULL;
2330} 2333}
2331 2334
@@ -2367,7 +2370,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
2367#endif 2370#endif
2368 2371
2369 spin_lock_init(&sbinfo->stat_lock); 2372 spin_lock_init(&sbinfo->stat_lock);
2370 percpu_counter_init(&sbinfo->used_blocks, 0); 2373 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2374 goto failed;
2371 sbinfo->free_inodes = sbinfo->max_inodes; 2375 sbinfo->free_inodes = sbinfo->max_inodes;
2372 2376
2373 sb->s_maxbytes = SHMEM_MAX_BYTES; 2377 sb->s_maxbytes = SHMEM_MAX_BYTES;
diff --git a/mm/slab.c b/mm/slab.c
index 88435fcc838..fcae9815d3b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2330,8 +2330,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2330 } 2330 }
2331#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2331#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2332 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2332 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2333 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2333 && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2334 cachep->obj_offset += PAGE_SIZE - size; 2334 cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2335 size = PAGE_SIZE; 2335 size = PAGE_SIZE;
2336 } 2336 }
2337#endif 2337#endif