aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-02-15 13:02:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-15 18:21:11 -0500
commita7d6e4ecdb7648478ddec76d30d87d03d6e22b31 (patch)
tree1e0110780ac0a8eeef2629e1d5880602bd6003c0
parent09f586b35d8503b57de1e0e9b19bc6b38e0d7319 (diff)
thp: prevent hugepages during args/env copying into the user stack
Transparent hugepages can only be created if rmap is fully functional. So we must prevent hugepages to be created while is_vma_temporary_stack() is true. This also optmizes away some harmless but unnecessary setting of khugepaged_scan.address and it switches some BUG_ON to VM_BUG_ON. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--mm/huge_memory.c35
2 files changed, 18 insertions, 20 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8e6c8c42bc3c..df29c8fde36b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
57 (transparent_hugepage_flags & \ 57 (transparent_hugepage_flags & \
58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
60 !((__vma)->vm_flags & VM_NOHUGEPAGE)) 60 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
61 !is_vma_temporary_stack(__vma))
61#define transparent_hugepage_defrag(__vma) \ 62#define transparent_hugepage_defrag(__vma) \
62 ((transparent_hugepage_flags & \ 63 ((transparent_hugepage_flags & \
63 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e62ddb8f24b6..3e29781ee762 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm,
1811 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 1811 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1812 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 1812 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1813 goto out; 1813 goto out;
1814 if (is_vma_temporary_stack(vma))
1815 goto out;
1814 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 1816 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1815 1817
1816 pgd = pgd_offset(mm, address); 1818 pgd = pgd_offset(mm, address);
@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2032 if ((!(vma->vm_flags & VM_HUGEPAGE) && 2034 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2033 !khugepaged_always()) || 2035 !khugepaged_always()) ||
2034 (vma->vm_flags & VM_NOHUGEPAGE)) { 2036 (vma->vm_flags & VM_NOHUGEPAGE)) {
2037 skip:
2035 progress++; 2038 progress++;
2036 continue; 2039 continue;
2037 } 2040 }
2038
2039 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 2041 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
2040 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { 2042 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
2041 khugepaged_scan.address = vma->vm_end; 2043 goto skip;
2042 progress++; 2044 if (is_vma_temporary_stack(vma))
2043 continue; 2045 goto skip;
2044 } 2046
2045 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 2047 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
2046 2048
2047 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2049 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2048 hend = vma->vm_end & HPAGE_PMD_MASK; 2050 hend = vma->vm_end & HPAGE_PMD_MASK;
2049 if (hstart >= hend) { 2051 if (hstart >= hend)
2050 progress++; 2052 goto skip;
2051 continue; 2053 if (khugepaged_scan.address > hend)
2052 } 2054 goto skip;
2053 if (khugepaged_scan.address < hstart) 2055 if (khugepaged_scan.address < hstart)
2054 khugepaged_scan.address = hstart; 2056 khugepaged_scan.address = hstart;
2055 if (khugepaged_scan.address > hend) { 2057 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2056 khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
2057 progress++;
2058 continue;
2059 }
2060 BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2061 2058
2062 while (khugepaged_scan.address < hend) { 2059 while (khugepaged_scan.address < hend) {
2063 int ret; 2060 int ret;
@@ -2086,7 +2083,7 @@ breakouterloop:
2086breakouterloop_mmap_sem: 2083breakouterloop_mmap_sem:
2087 2084
2088 spin_lock(&khugepaged_mm_lock); 2085 spin_lock(&khugepaged_mm_lock);
2089 BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2086 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2090 /* 2087 /*
2091 * Release the current mm_slot if this mm is about to die, or 2088 * Release the current mm_slot if this mm is about to die, or
2092 * if we scanned all vmas of this mm. 2089 * if we scanned all vmas of this mm.
@@ -2241,9 +2238,9 @@ static int khugepaged(void *none)
2241 2238
2242 for (;;) { 2239 for (;;) {
2243 mutex_unlock(&khugepaged_mutex); 2240 mutex_unlock(&khugepaged_mutex);
2244 BUG_ON(khugepaged_thread != current); 2241 VM_BUG_ON(khugepaged_thread != current);
2245 khugepaged_loop(); 2242 khugepaged_loop();
2246 BUG_ON(khugepaged_thread != current); 2243 VM_BUG_ON(khugepaged_thread != current);
2247 2244
2248 mutex_lock(&khugepaged_mutex); 2245 mutex_lock(&khugepaged_mutex);
2249 if (!khugepaged_enabled()) 2246 if (!khugepaged_enabled())