aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 50a4aa0255a0..2ec8eb5a9cdd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -29,6 +29,7 @@
29#include <linux/mmu_notifier.h> 29#include <linux/mmu_notifier.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/audit.h> 31#include <linux/audit.h>
32#include <linux/khugepaged.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
@@ -253,7 +254,15 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
253 down_write(&mm->mmap_sem); 254 down_write(&mm->mmap_sem);
254 255
255#ifdef CONFIG_COMPAT_BRK 256#ifdef CONFIG_COMPAT_BRK
256 min_brk = mm->end_code; 257 /*
258 * CONFIG_COMPAT_BRK can still be overridden by setting
259 * randomize_va_space to 2, which will still cause mm->start_brk
260 * to be arbitrarily shifted
261 */
262 if (mm->start_brk > PAGE_ALIGN(mm->end_data))
263 min_brk = mm->start_brk;
264 else
265 min_brk = mm->end_data;
257#else 266#else
258 min_brk = mm->start_brk; 267 min_brk = mm->start_brk;
259#endif 268#endif
@@ -588,6 +597,8 @@ again: remove_next = 1 + (end > next->vm_end);
588 } 597 }
589 } 598 }
590 599
600 vma_adjust_trans_huge(vma, start, end, adjust_next);
601
591 /* 602 /*
592 * When changing only vma->vm_end, we don't really need anon_vma 603 * When changing only vma->vm_end, we don't really need anon_vma
593 * lock. This is a fairly rare case by itself, but the anon_vma 604 * lock. This is a fairly rare case by itself, but the anon_vma
@@ -815,6 +826,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
815 end, prev->vm_pgoff, NULL); 826 end, prev->vm_pgoff, NULL);
816 if (err) 827 if (err)
817 return NULL; 828 return NULL;
829 khugepaged_enter_vma_merge(prev);
818 return prev; 830 return prev;
819 } 831 }
820 832
@@ -833,6 +845,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
833 next->vm_pgoff - pglen, NULL); 845 next->vm_pgoff - pglen, NULL);
834 if (err) 846 if (err)
835 return NULL; 847 return NULL;
848 khugepaged_enter_vma_merge(area);
836 return area; 849 return area;
837 } 850 }
838 851
@@ -1761,6 +1774,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1761 } 1774 }
1762 } 1775 }
1763 vma_unlock_anon_vma(vma); 1776 vma_unlock_anon_vma(vma);
1777 khugepaged_enter_vma_merge(vma);
1764 return error; 1778 return error;
1765} 1779}
1766#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 1780#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1808,6 +1822,7 @@ static int expand_downwards(struct vm_area_struct *vma,
1808 } 1822 }
1809 } 1823 }
1810 vma_unlock_anon_vma(vma); 1824 vma_unlock_anon_vma(vma);
1825 khugepaged_enter_vma_merge(vma);
1811 return error; 1826 return error;
1812} 1827}
1813 1828