diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 36 |
1 files changed, 13 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86fe697e8bfb..0e7ded98d114 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); | |||
| 842 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | 842 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 843 | 843 | ||
| 844 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, | 844 | static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 845 | pmd_t *pmd) | 845 | pmd_t *pmd, int flags) |
| 846 | { | 846 | { |
| 847 | pmd_t _pmd; | 847 | pmd_t _pmd; |
| 848 | 848 | ||
| 849 | /* | 849 | _pmd = pmd_mkyoung(*pmd); |
| 850 | * We should set the dirty bit only for FOLL_WRITE but for now | 850 | if (flags & FOLL_WRITE) |
| 851 | * the dirty bit in the pmd is meaningless. And if the dirty | 851 | _pmd = pmd_mkdirty(_pmd); |
| 852 | * bit will become meaningful and we'll only set it with | ||
| 853 | * FOLL_WRITE, an atomic set_bit will be required on the pmd to | ||
| 854 | * set the young bit, instead of the current set_pmd_at. | ||
| 855 | */ | ||
| 856 | _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); | ||
| 857 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, | 852 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, |
| 858 | pmd, _pmd, 1)) | 853 | pmd, _pmd, flags & FOLL_WRITE)) |
| 859 | update_mmu_cache_pmd(vma, addr, pmd); | 854 | update_mmu_cache_pmd(vma, addr, pmd); |
| 860 | } | 855 | } |
| 861 | 856 | ||
| @@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 884 | return NULL; | 879 | return NULL; |
| 885 | 880 | ||
| 886 | if (flags & FOLL_TOUCH) | 881 | if (flags & FOLL_TOUCH) |
| 887 | touch_pmd(vma, addr, pmd); | 882 | touch_pmd(vma, addr, pmd, flags); |
| 888 | 883 | ||
| 889 | /* | 884 | /* |
| 890 | * device mapped pages can only be returned if the | 885 | * device mapped pages can only be returned if the |
| @@ -995,20 +990,15 @@ out: | |||
| 995 | 990 | ||
| 996 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | 991 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 997 | static void touch_pud(struct vm_area_struct *vma, unsigned long addr, | 992 | static void touch_pud(struct vm_area_struct *vma, unsigned long addr, |
| 998 | pud_t *pud) | 993 | pud_t *pud, int flags) |
| 999 | { | 994 | { |
| 1000 | pud_t _pud; | 995 | pud_t _pud; |
| 1001 | 996 | ||
| 1002 | /* | 997 | _pud = pud_mkyoung(*pud); |
| 1003 | * We should set the dirty bit only for FOLL_WRITE but for now | 998 | if (flags & FOLL_WRITE) |
| 1004 | * the dirty bit in the pud is meaningless. And if the dirty | 999 | _pud = pud_mkdirty(_pud); |
| 1005 | * bit will become meaningful and we'll only set it with | ||
| 1006 | * FOLL_WRITE, an atomic set_bit will be required on the pud to | ||
| 1007 | * set the young bit, instead of the current set_pud_at. | ||
| 1008 | */ | ||
| 1009 | _pud = pud_mkyoung(pud_mkdirty(*pud)); | ||
| 1010 | if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, | 1000 | if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, |
| 1011 | pud, _pud, 1)) | 1001 | pud, _pud, flags & FOLL_WRITE)) |
| 1012 | update_mmu_cache_pud(vma, addr, pud); | 1002 | update_mmu_cache_pud(vma, addr, pud); |
| 1013 | } | 1003 | } |
| 1014 | 1004 | ||
| @@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 1031 | return NULL; | 1021 | return NULL; |
| 1032 | 1022 | ||
| 1033 | if (flags & FOLL_TOUCH) | 1023 | if (flags & FOLL_TOUCH) |
| 1034 | touch_pud(vma, addr, pud); | 1024 | touch_pud(vma, addr, pud, flags); |
| 1035 | 1025 | ||
| 1036 | /* | 1026 | /* |
| 1037 | * device mapped pages can only be returned if the | 1027 | * device mapped pages can only be returned if the |
| @@ -1424,7 +1414,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | |||
| 1424 | page = pmd_page(*pmd); | 1414 | page = pmd_page(*pmd); |
| 1425 | VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); | 1415 | VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); |
| 1426 | if (flags & FOLL_TOUCH) | 1416 | if (flags & FOLL_TOUCH) |
| 1427 | touch_pmd(vma, addr, pmd); | 1417 | touch_pmd(vma, addr, pmd, flags); |
| 1428 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { | 1418 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
| 1429 | /* | 1419 | /* |
| 1430 | * We don't mlock() pte-mapped THPs. This way we can avoid | 1420 | * We don't mlock() pte-mapped THPs. This way we can avoid |
