diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-02-15 13:02:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-02-15 18:21:11 -0500 |
commit | a7d6e4ecdb7648478ddec76d30d87d03d6e22b31 (patch) | |
tree | 1e0110780ac0a8eeef2629e1d5880602bd6003c0 /mm | |
parent | 09f586b35d8503b57de1e0e9b19bc6b38e0d7319 (diff) |
thp: prevent hugepages during args/env copying into the user stack
Transparent hugepages can only be created if rmap is fully
functional. So we must prevent hugepages to be created while
is_vma_temporary_stack() is true.
This also optmizes away some harmless but unnecessary setting of
khugepaged_scan.address and it switches some BUG_ON to VM_BUG_ON.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e62ddb8f24b6..3e29781ee762 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) | 1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
1813 | goto out; | 1813 | goto out; |
1814 | if (is_vma_temporary_stack(vma)) | ||
1815 | goto out; | ||
1814 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 1816 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
1815 | 1817 | ||
1816 | pgd = pgd_offset(mm, address); | 1818 | pgd = pgd_offset(mm, address); |
@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
2032 | if ((!(vma->vm_flags & VM_HUGEPAGE) && | 2034 | if ((!(vma->vm_flags & VM_HUGEPAGE) && |
2033 | !khugepaged_always()) || | 2035 | !khugepaged_always()) || |
2034 | (vma->vm_flags & VM_NOHUGEPAGE)) { | 2036 | (vma->vm_flags & VM_NOHUGEPAGE)) { |
2037 | skip: | ||
2035 | progress++; | 2038 | progress++; |
2036 | continue; | 2039 | continue; |
2037 | } | 2040 | } |
2038 | |||
2039 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 2041 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
2040 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { | 2042 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
2041 | khugepaged_scan.address = vma->vm_end; | 2043 | goto skip; |
2042 | progress++; | 2044 | if (is_vma_temporary_stack(vma)) |
2043 | continue; | 2045 | goto skip; |
2044 | } | 2046 | |
2045 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 2047 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
2046 | 2048 | ||
2047 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 2049 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
2048 | hend = vma->vm_end & HPAGE_PMD_MASK; | 2050 | hend = vma->vm_end & HPAGE_PMD_MASK; |
2049 | if (hstart >= hend) { | 2051 | if (hstart >= hend) |
2050 | progress++; | 2052 | goto skip; |
2051 | continue; | 2053 | if (khugepaged_scan.address > hend) |
2052 | } | 2054 | goto skip; |
2053 | if (khugepaged_scan.address < hstart) | 2055 | if (khugepaged_scan.address < hstart) |
2054 | khugepaged_scan.address = hstart; | 2056 | khugepaged_scan.address = hstart; |
2055 | if (khugepaged_scan.address > hend) { | 2057 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
2056 | khugepaged_scan.address = hend + HPAGE_PMD_SIZE; | ||
2057 | progress++; | ||
2058 | continue; | ||
2059 | } | ||
2060 | BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); | ||
2061 | 2058 | ||
2062 | while (khugepaged_scan.address < hend) { | 2059 | while (khugepaged_scan.address < hend) { |
2063 | int ret; | 2060 | int ret; |
@@ -2086,7 +2083,7 @@ breakouterloop: | |||
2086 | breakouterloop_mmap_sem: | 2083 | breakouterloop_mmap_sem: |
2087 | 2084 | ||
2088 | spin_lock(&khugepaged_mm_lock); | 2085 | spin_lock(&khugepaged_mm_lock); |
2089 | BUG_ON(khugepaged_scan.mm_slot != mm_slot); | 2086 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); |
2090 | /* | 2087 | /* |
2091 | * Release the current mm_slot if this mm is about to die, or | 2088 | * Release the current mm_slot if this mm is about to die, or |
2092 | * if we scanned all vmas of this mm. | 2089 | * if we scanned all vmas of this mm. |
@@ -2241,9 +2238,9 @@ static int khugepaged(void *none) | |||
2241 | 2238 | ||
2242 | for (;;) { | 2239 | for (;;) { |
2243 | mutex_unlock(&khugepaged_mutex); | 2240 | mutex_unlock(&khugepaged_mutex); |
2244 | BUG_ON(khugepaged_thread != current); | 2241 | VM_BUG_ON(khugepaged_thread != current); |
2245 | khugepaged_loop(); | 2242 | khugepaged_loop(); |
2246 | BUG_ON(khugepaged_thread != current); | 2243 | VM_BUG_ON(khugepaged_thread != current); |
2247 | 2244 | ||
2248 | mutex_lock(&khugepaged_mutex); | 2245 | mutex_lock(&khugepaged_mutex); |
2249 | if (!khugepaged_enabled()) | 2246 | if (!khugepaged_enabled()) |