diff options
-rw-r--r-- | mm/khugepaged.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 79d55e10bca9..961cbe9062a5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c | |||
@@ -397,10 +397,11 @@ static inline int khugepaged_test_exit(struct mm_struct *mm) | |||
397 | return atomic_read(&mm->mm_users) == 0; | 397 | return atomic_read(&mm->mm_users) == 0; |
398 | } | 398 | } |
399 | 399 | ||
400 | static bool hugepage_vma_check(struct vm_area_struct *vma) | 400 | static bool hugepage_vma_check(struct vm_area_struct *vma, |
401 | unsigned long vm_flags) | ||
401 | { | 402 | { |
402 | if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || | 403 | if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || |
403 | (vma->vm_flags & VM_NOHUGEPAGE) || | 404 | (vm_flags & VM_NOHUGEPAGE) || |
404 | test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) | 405 | test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) |
405 | return false; | 406 | return false; |
406 | if (shmem_file(vma->vm_file)) { | 407 | if (shmem_file(vma->vm_file)) { |
@@ -413,7 +414,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) | |||
413 | return false; | 414 | return false; |
414 | if (is_vma_temporary_stack(vma)) | 415 | if (is_vma_temporary_stack(vma)) |
415 | return false; | 416 | return false; |
416 | return !(vma->vm_flags & VM_NO_KHUGEPAGED); | 417 | return !(vm_flags & VM_NO_KHUGEPAGED); |
417 | } | 418 | } |
418 | 419 | ||
419 | int __khugepaged_enter(struct mm_struct *mm) | 420 | int __khugepaged_enter(struct mm_struct *mm) |
@@ -458,7 +459,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, | |||
458 | * khugepaged does not yet work on non-shmem files or special | 459 | * khugepaged does not yet work on non-shmem files or special |
459 | * mappings. And file-private shmem THP is not supported. | 460 | * mappings. And file-private shmem THP is not supported. |
460 | */ | 461 | */ |
461 | if (!hugepage_vma_check(vma)) | 462 | if (!hugepage_vma_check(vma, vm_flags)) |
462 | return 0; | 463 | return 0; |
463 | 464 | ||
464 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 465 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
@@ -861,7 +862,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, | |||
861 | hend = vma->vm_end & HPAGE_PMD_MASK; | 862 | hend = vma->vm_end & HPAGE_PMD_MASK; |
862 | if (address < hstart || address + HPAGE_PMD_SIZE > hend) | 863 | if (address < hstart || address + HPAGE_PMD_SIZE > hend) |
863 | return SCAN_ADDRESS_RANGE; | 864 | return SCAN_ADDRESS_RANGE; |
864 | if (!hugepage_vma_check(vma)) | 865 | if (!hugepage_vma_check(vma, vma->vm_flags)) |
865 | return SCAN_VMA_CHECK; | 866 | return SCAN_VMA_CHECK; |
866 | return 0; | 867 | return 0; |
867 | } | 868 | } |
@@ -1695,7 +1696,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
1695 | progress++; | 1696 | progress++; |
1696 | break; | 1697 | break; |
1697 | } | 1698 | } |
1698 | if (!hugepage_vma_check(vma)) { | 1699 | if (!hugepage_vma_check(vma, vma->vm_flags)) { |
1699 | skip: | 1700 | skip: |
1700 | progress++; | 1701 | progress++; |
1701 | continue; | 1702 | continue; |