diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/memory.c | 15 | ||||
| -rw-r--r-- | mm/mlock.c | 21 | ||||
| -rw-r--r-- | mm/mmap.c | 21 | ||||
| -rw-r--r-- | mm/nommu.c | 7 | ||||
| -rw-r--r-- | mm/oom_kill.c | 16 | ||||
| -rw-r--r-- | mm/page-writeback.c | 3 |
6 files changed, 60 insertions, 23 deletions
diff --git a/mm/memory.c b/mm/memory.c index b6e5fd23cc5a..2ed2267439df 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
| 2770 | { | 2770 | { |
| 2771 | address &= PAGE_MASK; | 2771 | address &= PAGE_MASK; |
| 2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { | 2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { |
| 2773 | address -= PAGE_SIZE; | 2773 | struct vm_area_struct *prev = vma->vm_prev; |
| 2774 | if (find_vma(vma->vm_mm, address) != vma) | 2774 | |
| 2775 | return -ENOMEM; | 2775 | /* |
| 2776 | * Is there a mapping abutting this one below? | ||
| 2777 | * | ||
| 2778 | * That's only ok if it's the same stack mapping | ||
| 2779 | * that has gotten split.. | ||
| 2780 | */ | ||
| 2781 | if (prev && prev->vm_end == address) | ||
| 2782 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; | ||
| 2776 | 2783 | ||
| 2777 | expand_stack(vma, address); | 2784 | expand_stack(vma, address - PAGE_SIZE); |
| 2778 | } | 2785 | } |
| 2779 | return 0; | 2786 | return 0; |
| 2780 | } | 2787 | } |
diff --git a/mm/mlock.c b/mm/mlock.c index 49e5e4cb8232..cbae7c5b9568 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page) | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* Is the vma a continuation of the stack vma above it? */ | ||
| 139 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
| 140 | { | ||
| 141 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
| 142 | } | ||
| 143 | |||
| 144 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 145 | { | ||
| 146 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
| 147 | (vma->vm_start == addr) && | ||
| 148 | !vma_stack_continue(vma->vm_prev, addr); | ||
| 149 | } | ||
| 150 | |||
| 138 | /** | 151 | /** |
| 139 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. | 152 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
| 140 | * @vma: target vma | 153 | * @vma: target vma |
| @@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 168 | gup_flags |= FOLL_WRITE; | 181 | gup_flags |= FOLL_WRITE; |
| 169 | 182 | ||
| 170 | /* We don't try to access the guard page of a stack vma */ | 183 | /* We don't try to access the guard page of a stack vma */ |
| 171 | if (vma->vm_flags & VM_GROWSDOWN) { | 184 | if (stack_guard_page(vma, start)) { |
| 172 | if (start == vma->vm_start) { | 185 | addr += PAGE_SIZE; |
| 173 | start += PAGE_SIZE; | 186 | nr_pages--; |
| 174 | nr_pages--; | ||
| 175 | } | ||
| 176 | } | 187 | } |
| 177 | 188 | ||
| 178 | while (nr_pages > 0) { | 189 | while (nr_pages > 0) { |
| @@ -388,17 +388,23 @@ static inline void | |||
| 388 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | 388 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
| 389 | struct vm_area_struct *prev, struct rb_node *rb_parent) | 389 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
| 390 | { | 390 | { |
| 391 | struct vm_area_struct *next; | ||
| 392 | |||
| 393 | vma->vm_prev = prev; | ||
| 391 | if (prev) { | 394 | if (prev) { |
| 392 | vma->vm_next = prev->vm_next; | 395 | next = prev->vm_next; |
| 393 | prev->vm_next = vma; | 396 | prev->vm_next = vma; |
| 394 | } else { | 397 | } else { |
| 395 | mm->mmap = vma; | 398 | mm->mmap = vma; |
| 396 | if (rb_parent) | 399 | if (rb_parent) |
| 397 | vma->vm_next = rb_entry(rb_parent, | 400 | next = rb_entry(rb_parent, |
| 398 | struct vm_area_struct, vm_rb); | 401 | struct vm_area_struct, vm_rb); |
| 399 | else | 402 | else |
| 400 | vma->vm_next = NULL; | 403 | next = NULL; |
| 401 | } | 404 | } |
| 405 | vma->vm_next = next; | ||
| 406 | if (next) | ||
| 407 | next->vm_prev = vma; | ||
| 402 | } | 408 | } |
| 403 | 409 | ||
| 404 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | 410 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
| @@ -483,7 +489,11 @@ static inline void | |||
| 483 | __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, | 489 | __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, |
| 484 | struct vm_area_struct *prev) | 490 | struct vm_area_struct *prev) |
| 485 | { | 491 | { |
| 486 | prev->vm_next = vma->vm_next; | 492 | struct vm_area_struct *next = vma->vm_next; |
| 493 | |||
| 494 | prev->vm_next = next; | ||
| 495 | if (next) | ||
| 496 | next->vm_prev = prev; | ||
| 487 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 497 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
| 488 | if (mm->mmap_cache == vma) | 498 | if (mm->mmap_cache == vma) |
| 489 | mm->mmap_cache = prev; | 499 | mm->mmap_cache = prev; |
| @@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1915 | unsigned long addr; | 1925 | unsigned long addr; |
| 1916 | 1926 | ||
| 1917 | insertion_point = (prev ? &prev->vm_next : &mm->mmap); | 1927 | insertion_point = (prev ? &prev->vm_next : &mm->mmap); |
| 1928 | vma->vm_prev = NULL; | ||
| 1918 | do { | 1929 | do { |
| 1919 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 1930 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
| 1920 | mm->map_count--; | 1931 | mm->map_count--; |
| @@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1922 | vma = vma->vm_next; | 1933 | vma = vma->vm_next; |
| 1923 | } while (vma && vma->vm_start < end); | 1934 | } while (vma && vma->vm_start < end); |
| 1924 | *insertion_point = vma; | 1935 | *insertion_point = vma; |
| 1936 | if (vma) | ||
| 1937 | vma->vm_prev = prev; | ||
| 1925 | tail_vma->vm_next = NULL; | 1938 | tail_vma->vm_next = NULL; |
| 1926 | if (mm->unmap_area == arch_unmap_area) | 1939 | if (mm->unmap_area == arch_unmap_area) |
| 1927 | addr = prev ? prev->vm_end : mm->mmap_base; | 1940 | addr = prev ? prev->vm_end : mm->mmap_base; |
diff --git a/mm/nommu.c b/mm/nommu.c index efa9a380335e..88ff091eb07a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | |||
| 604 | */ | 604 | */ |
| 605 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 605 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
| 606 | { | 606 | { |
| 607 | struct vm_area_struct *pvma, **pp; | 607 | struct vm_area_struct *pvma, **pp, *next; |
| 608 | struct address_space *mapping; | 608 | struct address_space *mapping; |
| 609 | struct rb_node **p, *parent; | 609 | struct rb_node **p, *parent; |
| 610 | 610 | ||
| @@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
| 664 | break; | 664 | break; |
| 665 | } | 665 | } |
| 666 | 666 | ||
| 667 | vma->vm_next = *pp; | 667 | next = *pp; |
| 668 | *pp = vma; | 668 | *pp = vma; |
| 669 | vma->vm_next = next; | ||
| 670 | if (next) | ||
| 671 | next->vm_prev = vma; | ||
| 669 | } | 672 | } |
| 670 | 673 | ||
| 671 | /* | 674 | /* |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5014e50644d1..fc81cb22869e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", | 374 | pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", |
| 375 | task->pid, __task_cred(task)->uid, task->tgid, | 375 | task->pid, task_uid(task), task->tgid, |
| 376 | task->mm->total_vm, get_mm_rss(task->mm), | 376 | task->mm->total_vm, get_mm_rss(task->mm), |
| 377 | task_cpu(task), task->signal->oom_adj, | 377 | task_cpu(task), task->signal->oom_adj, |
| 378 | task->signal->oom_score_adj, task->comm); | 378 | task->signal->oom_score_adj, task->comm); |
| @@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 401 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | 401 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) |
| 402 | { | 402 | { |
| 403 | p = find_lock_task_mm(p); | 403 | p = find_lock_task_mm(p); |
| 404 | if (!p) { | 404 | if (!p) |
| 405 | task_unlock(p); | ||
| 406 | return 1; | 405 | return 1; |
| 407 | } | 406 | |
| 408 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | 407 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
| 409 | task_pid_nr(p), p->comm, K(p->mm->total_vm), | 408 | task_pid_nr(p), p->comm, K(p->mm->total_vm), |
| 410 | K(get_mm_counter(p->mm, MM_ANONPAGES)), | 409 | K(get_mm_counter(p->mm, MM_ANONPAGES)), |
| @@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 647 | unsigned long freed = 0; | 646 | unsigned long freed = 0; |
| 648 | unsigned int points; | 647 | unsigned int points; |
| 649 | enum oom_constraint constraint = CONSTRAINT_NONE; | 648 | enum oom_constraint constraint = CONSTRAINT_NONE; |
| 649 | int killed = 0; | ||
| 650 | 650 | ||
| 651 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | 651 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
| 652 | if (freed > 0) | 652 | if (freed > 0) |
| @@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 684 | if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, | 684 | if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, |
| 685 | NULL, nodemask, | 685 | NULL, nodemask, |
| 686 | "Out of memory (oom_kill_allocating_task)")) | 686 | "Out of memory (oom_kill_allocating_task)")) |
| 687 | return; | 687 | goto out; |
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | retry: | 690 | retry: |
| @@ -692,7 +692,7 @@ retry: | |||
| 692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : | 692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : |
| 693 | NULL); | 693 | NULL); |
| 694 | if (PTR_ERR(p) == -1UL) | 694 | if (PTR_ERR(p) == -1UL) |
| 695 | return; | 695 | goto out; |
| 696 | 696 | ||
| 697 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 697 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
| 698 | if (!p) { | 698 | if (!p) { |
| @@ -704,13 +704,15 @@ retry: | |||
| 704 | if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, | 704 | if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, |
| 705 | nodemask, "Out of memory")) | 705 | nodemask, "Out of memory")) |
| 706 | goto retry; | 706 | goto retry; |
| 707 | killed = 1; | ||
| 708 | out: | ||
| 707 | read_unlock(&tasklist_lock); | 709 | read_unlock(&tasklist_lock); |
| 708 | 710 | ||
| 709 | /* | 711 | /* |
| 710 | * Give "p" a good chance of killing itself before we | 712 | * Give "p" a good chance of killing itself before we |
| 711 | * retry to allocate memory unless "p" is current | 713 | * retry to allocate memory unless "p" is current |
| 712 | */ | 714 | */ |
| 713 | if (!test_thread_flag(TIF_MEMDIE)) | 715 | if (killed && !test_thread_flag(TIF_MEMDIE)) |
| 714 | schedule_timeout_uninterruptible(1); | 716 | schedule_timeout_uninterruptible(1); |
| 715 | } | 717 | } |
| 716 | 718 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7262aacea8a2..c09ef5219cbe 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping, | |||
| 836 | spin_unlock_irq(&mapping->tree_lock); | 836 | spin_unlock_irq(&mapping->tree_lock); |
| 837 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | 837 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); |
| 838 | cond_resched(); | 838 | cond_resched(); |
| 839 | } while (tagged >= WRITEBACK_TAG_BATCH); | 839 | /* We check 'start' to handle wrapping when end == ~0UL */ |
| 840 | } while (tagged >= WRITEBACK_TAG_BATCH && start); | ||
| 840 | } | 841 | } |
| 841 | EXPORT_SYMBOL(tag_pages_for_writeback); | 842 | EXPORT_SYMBOL(tag_pages_for_writeback); |
| 842 | 843 | ||
