aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:06:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit82b0f8c39a3869b6fd2a10e180a862248736ec6f (patch)
treecc10f381647ad18a17b05020783991ed32ae4590 /mm/huge_memory.c
parent8b7457ef9a9eb46cd1675d40d8e1fd3c47a38395 (diff)
mm: join struct fault_env and vm_fault
Currently we have two different structures for passing fault information around - struct vm_fault and struct fault_env. DAX will need more information in struct vm_fault to handle its faults so the content of that structure would become event closer to fault_env. Furthermore it would need to generate struct fault_env to be able to call some of the generic functions. So at this point I don't think there's much use in keeping these two structures separate. Just embed into struct vm_fault all that is needed to use it for both purposes. Link: http://lkml.kernel.org/r/1479460644-25076-2-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c173
1 files changed, 87 insertions, 86 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cee42cf05477..10eedbf14421 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -542,13 +542,13 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
542} 542}
543EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 543EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
544 544
545static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, 545static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
546 gfp_t gfp) 546 gfp_t gfp)
547{ 547{
548 struct vm_area_struct *vma = fe->vma; 548 struct vm_area_struct *vma = vmf->vma;
549 struct mem_cgroup *memcg; 549 struct mem_cgroup *memcg;
550 pgtable_t pgtable; 550 pgtable_t pgtable;
551 unsigned long haddr = fe->address & HPAGE_PMD_MASK; 551 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
552 552
553 VM_BUG_ON_PAGE(!PageCompound(page), page); 553 VM_BUG_ON_PAGE(!PageCompound(page), page);
554 554
@@ -573,9 +573,9 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
573 */ 573 */
574 __SetPageUptodate(page); 574 __SetPageUptodate(page);
575 575
576 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 576 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
577 if (unlikely(!pmd_none(*fe->pmd))) { 577 if (unlikely(!pmd_none(*vmf->pmd))) {
578 spin_unlock(fe->ptl); 578 spin_unlock(vmf->ptl);
579 mem_cgroup_cancel_charge(page, memcg, true); 579 mem_cgroup_cancel_charge(page, memcg, true);
580 put_page(page); 580 put_page(page);
581 pte_free(vma->vm_mm, pgtable); 581 pte_free(vma->vm_mm, pgtable);
@@ -586,11 +586,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
586 if (userfaultfd_missing(vma)) { 586 if (userfaultfd_missing(vma)) {
587 int ret; 587 int ret;
588 588
589 spin_unlock(fe->ptl); 589 spin_unlock(vmf->ptl);
590 mem_cgroup_cancel_charge(page, memcg, true); 590 mem_cgroup_cancel_charge(page, memcg, true);
591 put_page(page); 591 put_page(page);
592 pte_free(vma->vm_mm, pgtable); 592 pte_free(vma->vm_mm, pgtable);
593 ret = handle_userfault(fe, VM_UFFD_MISSING); 593 ret = handle_userfault(vmf, VM_UFFD_MISSING);
594 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 594 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
595 return ret; 595 return ret;
596 } 596 }
@@ -600,11 +600,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
600 page_add_new_anon_rmap(page, vma, haddr, true); 600 page_add_new_anon_rmap(page, vma, haddr, true);
601 mem_cgroup_commit_charge(page, memcg, false, true); 601 mem_cgroup_commit_charge(page, memcg, false, true);
602 lru_cache_add_active_or_unevictable(page, vma); 602 lru_cache_add_active_or_unevictable(page, vma);
603 pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); 603 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
604 set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); 604 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
605 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 605 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
606 atomic_long_inc(&vma->vm_mm->nr_ptes); 606 atomic_long_inc(&vma->vm_mm->nr_ptes);
607 spin_unlock(fe->ptl); 607 spin_unlock(vmf->ptl);
608 count_vm_event(THP_FAULT_ALLOC); 608 count_vm_event(THP_FAULT_ALLOC);
609 } 609 }
610 610
@@ -651,12 +651,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
651 return true; 651 return true;
652} 652}
653 653
654int do_huge_pmd_anonymous_page(struct fault_env *fe) 654int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
655{ 655{
656 struct vm_area_struct *vma = fe->vma; 656 struct vm_area_struct *vma = vmf->vma;
657 gfp_t gfp; 657 gfp_t gfp;
658 struct page *page; 658 struct page *page;
659 unsigned long haddr = fe->address & HPAGE_PMD_MASK; 659 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
660 660
661 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 661 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
662 return VM_FAULT_FALLBACK; 662 return VM_FAULT_FALLBACK;
@@ -664,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
664 return VM_FAULT_OOM; 664 return VM_FAULT_OOM;
665 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 665 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
666 return VM_FAULT_OOM; 666 return VM_FAULT_OOM;
667 if (!(fe->flags & FAULT_FLAG_WRITE) && 667 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
668 !mm_forbids_zeropage(vma->vm_mm) && 668 !mm_forbids_zeropage(vma->vm_mm) &&
669 transparent_hugepage_use_zero_page()) { 669 transparent_hugepage_use_zero_page()) {
670 pgtable_t pgtable; 670 pgtable_t pgtable;
@@ -680,22 +680,22 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
680 count_vm_event(THP_FAULT_FALLBACK); 680 count_vm_event(THP_FAULT_FALLBACK);
681 return VM_FAULT_FALLBACK; 681 return VM_FAULT_FALLBACK;
682 } 682 }
683 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 683 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
684 ret = 0; 684 ret = 0;
685 set = false; 685 set = false;
686 if (pmd_none(*fe->pmd)) { 686 if (pmd_none(*vmf->pmd)) {
687 if (userfaultfd_missing(vma)) { 687 if (userfaultfd_missing(vma)) {
688 spin_unlock(fe->ptl); 688 spin_unlock(vmf->ptl);
689 ret = handle_userfault(fe, VM_UFFD_MISSING); 689 ret = handle_userfault(vmf, VM_UFFD_MISSING);
690 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 690 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
691 } else { 691 } else {
692 set_huge_zero_page(pgtable, vma->vm_mm, vma, 692 set_huge_zero_page(pgtable, vma->vm_mm, vma,
693 haddr, fe->pmd, zero_page); 693 haddr, vmf->pmd, zero_page);
694 spin_unlock(fe->ptl); 694 spin_unlock(vmf->ptl);
695 set = true; 695 set = true;
696 } 696 }
697 } else 697 } else
698 spin_unlock(fe->ptl); 698 spin_unlock(vmf->ptl);
699 if (!set) 699 if (!set)
700 pte_free(vma->vm_mm, pgtable); 700 pte_free(vma->vm_mm, pgtable);
701 return ret; 701 return ret;
@@ -707,7 +707,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
707 return VM_FAULT_FALLBACK; 707 return VM_FAULT_FALLBACK;
708 } 708 }
709 prep_transhuge_page(page); 709 prep_transhuge_page(page);
710 return __do_huge_pmd_anonymous_page(fe, page, gfp); 710 return __do_huge_pmd_anonymous_page(vmf, page, gfp);
711} 711}
712 712
713static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 713static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -879,30 +879,30 @@ out:
879 return ret; 879 return ret;
880} 880}
881 881
882void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) 882void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
883{ 883{
884 pmd_t entry; 884 pmd_t entry;
885 unsigned long haddr; 885 unsigned long haddr;
886 886
887 fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); 887 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
888 if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 888 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
889 goto unlock; 889 goto unlock;
890 890
891 entry = pmd_mkyoung(orig_pmd); 891 entry = pmd_mkyoung(orig_pmd);
892 haddr = fe->address & HPAGE_PMD_MASK; 892 haddr = vmf->address & HPAGE_PMD_MASK;
893 if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, 893 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
894 fe->flags & FAULT_FLAG_WRITE)) 894 vmf->flags & FAULT_FLAG_WRITE))
895 update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); 895 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
896 896
897unlock: 897unlock:
898 spin_unlock(fe->ptl); 898 spin_unlock(vmf->ptl);
899} 899}
900 900
901static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, 901static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
902 struct page *page) 902 struct page *page)
903{ 903{
904 struct vm_area_struct *vma = fe->vma; 904 struct vm_area_struct *vma = vmf->vma;
905 unsigned long haddr = fe->address & HPAGE_PMD_MASK; 905 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
906 struct mem_cgroup *memcg; 906 struct mem_cgroup *memcg;
907 pgtable_t pgtable; 907 pgtable_t pgtable;
908 pmd_t _pmd; 908 pmd_t _pmd;
@@ -921,7 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
921 for (i = 0; i < HPAGE_PMD_NR; i++) { 921 for (i = 0; i < HPAGE_PMD_NR; i++) {
922 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 922 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
923 __GFP_OTHER_NODE, vma, 923 __GFP_OTHER_NODE, vma,
924 fe->address, page_to_nid(page)); 924 vmf->address, page_to_nid(page));
925 if (unlikely(!pages[i] || 925 if (unlikely(!pages[i] ||
926 mem_cgroup_try_charge(pages[i], vma->vm_mm, 926 mem_cgroup_try_charge(pages[i], vma->vm_mm,
927 GFP_KERNEL, &memcg, false))) { 927 GFP_KERNEL, &memcg, false))) {
@@ -952,15 +952,15 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
952 mmun_end = haddr + HPAGE_PMD_SIZE; 952 mmun_end = haddr + HPAGE_PMD_SIZE;
953 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 953 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
954 954
955 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 955 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
956 if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 956 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
957 goto out_free_pages; 957 goto out_free_pages;
958 VM_BUG_ON_PAGE(!PageHead(page), page); 958 VM_BUG_ON_PAGE(!PageHead(page), page);
959 959
960 pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); 960 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
961 /* leave pmd empty until pte is filled */ 961 /* leave pmd empty until pte is filled */
962 962
963 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); 963 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
964 pmd_populate(vma->vm_mm, &_pmd, pgtable); 964 pmd_populate(vma->vm_mm, &_pmd, pgtable);
965 965
966 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 966 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -969,20 +969,20 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
969 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 969 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
970 memcg = (void *)page_private(pages[i]); 970 memcg = (void *)page_private(pages[i]);
971 set_page_private(pages[i], 0); 971 set_page_private(pages[i], 0);
972 page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); 972 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
973 mem_cgroup_commit_charge(pages[i], memcg, false, false); 973 mem_cgroup_commit_charge(pages[i], memcg, false, false);
974 lru_cache_add_active_or_unevictable(pages[i], vma); 974 lru_cache_add_active_or_unevictable(pages[i], vma);
975 fe->pte = pte_offset_map(&_pmd, haddr); 975 vmf->pte = pte_offset_map(&_pmd, haddr);
976 VM_BUG_ON(!pte_none(*fe->pte)); 976 VM_BUG_ON(!pte_none(*vmf->pte));
977 set_pte_at(vma->vm_mm, haddr, fe->pte, entry); 977 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
978 pte_unmap(fe->pte); 978 pte_unmap(vmf->pte);
979 } 979 }
980 kfree(pages); 980 kfree(pages);
981 981
982 smp_wmb(); /* make pte visible before pmd */ 982 smp_wmb(); /* make pte visible before pmd */
983 pmd_populate(vma->vm_mm, fe->pmd, pgtable); 983 pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
984 page_remove_rmap(page, true); 984 page_remove_rmap(page, true);
985 spin_unlock(fe->ptl); 985 spin_unlock(vmf->ptl);
986 986
987 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 987 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
988 988
@@ -993,7 +993,7 @@ out:
993 return ret; 993 return ret;
994 994
995out_free_pages: 995out_free_pages:
996 spin_unlock(fe->ptl); 996 spin_unlock(vmf->ptl);
997 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 997 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
998 for (i = 0; i < HPAGE_PMD_NR; i++) { 998 for (i = 0; i < HPAGE_PMD_NR; i++) {
999 memcg = (void *)page_private(pages[i]); 999 memcg = (void *)page_private(pages[i]);
@@ -1005,23 +1005,23 @@ out_free_pages:
1005 goto out; 1005 goto out;
1006} 1006}
1007 1007
1008int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) 1008int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1009{ 1009{
1010 struct vm_area_struct *vma = fe->vma; 1010 struct vm_area_struct *vma = vmf->vma;
1011 struct page *page = NULL, *new_page; 1011 struct page *page = NULL, *new_page;
1012 struct mem_cgroup *memcg; 1012 struct mem_cgroup *memcg;
1013 unsigned long haddr = fe->address & HPAGE_PMD_MASK; 1013 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1014 unsigned long mmun_start; /* For mmu_notifiers */ 1014 unsigned long mmun_start; /* For mmu_notifiers */
1015 unsigned long mmun_end; /* For mmu_notifiers */ 1015 unsigned long mmun_end; /* For mmu_notifiers */
1016 gfp_t huge_gfp; /* for allocation and charge */ 1016 gfp_t huge_gfp; /* for allocation and charge */
1017 int ret = 0; 1017 int ret = 0;
1018 1018
1019 fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); 1019 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1020 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1020 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1021 if (is_huge_zero_pmd(orig_pmd)) 1021 if (is_huge_zero_pmd(orig_pmd))
1022 goto alloc; 1022 goto alloc;
1023 spin_lock(fe->ptl); 1023 spin_lock(vmf->ptl);
1024 if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) 1024 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1025 goto out_unlock; 1025 goto out_unlock;
1026 1026
1027 page = pmd_page(orig_pmd); 1027 page = pmd_page(orig_pmd);
@@ -1034,13 +1034,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
1034 pmd_t entry; 1034 pmd_t entry;
1035 entry = pmd_mkyoung(orig_pmd); 1035 entry = pmd_mkyoung(orig_pmd);
1036 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1036 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1037 if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) 1037 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1038 update_mmu_cache_pmd(vma, fe->address, fe->pmd); 1038 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1039 ret |= VM_FAULT_WRITE; 1039 ret |= VM_FAULT_WRITE;
1040 goto out_unlock; 1040 goto out_unlock;
1041 } 1041 }
1042 get_page(page); 1042 get_page(page);
1043 spin_unlock(fe->ptl); 1043 spin_unlock(vmf->ptl);
1044alloc: 1044alloc:
1045 if (transparent_hugepage_enabled(vma) && 1045 if (transparent_hugepage_enabled(vma) &&
1046 !transparent_hugepage_debug_cow()) { 1046 !transparent_hugepage_debug_cow()) {
@@ -1053,12 +1053,12 @@ alloc:
1053 prep_transhuge_page(new_page); 1053 prep_transhuge_page(new_page);
1054 } else { 1054 } else {
1055 if (!page) { 1055 if (!page) {
1056 split_huge_pmd(vma, fe->pmd, fe->address); 1056 split_huge_pmd(vma, vmf->pmd, vmf->address);
1057 ret |= VM_FAULT_FALLBACK; 1057 ret |= VM_FAULT_FALLBACK;
1058 } else { 1058 } else {
1059 ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); 1059 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
1060 if (ret & VM_FAULT_OOM) { 1060 if (ret & VM_FAULT_OOM) {
1061 split_huge_pmd(vma, fe->pmd, fe->address); 1061 split_huge_pmd(vma, vmf->pmd, vmf->address);
1062 ret |= VM_FAULT_FALLBACK; 1062 ret |= VM_FAULT_FALLBACK;
1063 } 1063 }
1064 put_page(page); 1064 put_page(page);
@@ -1070,7 +1070,7 @@ alloc:
1070 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 1070 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
1071 huge_gfp, &memcg, true))) { 1071 huge_gfp, &memcg, true))) {
1072 put_page(new_page); 1072 put_page(new_page);
1073 split_huge_pmd(vma, fe->pmd, fe->address); 1073 split_huge_pmd(vma, vmf->pmd, vmf->address);
1074 if (page) 1074 if (page)
1075 put_page(page); 1075 put_page(page);
1076 ret |= VM_FAULT_FALLBACK; 1076 ret |= VM_FAULT_FALLBACK;
@@ -1090,11 +1090,11 @@ alloc:
1090 mmun_end = haddr + HPAGE_PMD_SIZE; 1090 mmun_end = haddr + HPAGE_PMD_SIZE;
1091 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 1091 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1092 1092
1093 spin_lock(fe->ptl); 1093 spin_lock(vmf->ptl);
1094 if (page) 1094 if (page)
1095 put_page(page); 1095 put_page(page);
1096 if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { 1096 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1097 spin_unlock(fe->ptl); 1097 spin_unlock(vmf->ptl);
1098 mem_cgroup_cancel_charge(new_page, memcg, true); 1098 mem_cgroup_cancel_charge(new_page, memcg, true);
1099 put_page(new_page); 1099 put_page(new_page);
1100 goto out_mn; 1100 goto out_mn;
@@ -1102,12 +1102,12 @@ alloc:
1102 pmd_t entry; 1102 pmd_t entry;
1103 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1103 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1104 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1104 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1105 pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); 1105 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1106 page_add_new_anon_rmap(new_page, vma, haddr, true); 1106 page_add_new_anon_rmap(new_page, vma, haddr, true);
1107 mem_cgroup_commit_charge(new_page, memcg, false, true); 1107 mem_cgroup_commit_charge(new_page, memcg, false, true);
1108 lru_cache_add_active_or_unevictable(new_page, vma); 1108 lru_cache_add_active_or_unevictable(new_page, vma);
1109 set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); 1109 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1110 update_mmu_cache_pmd(vma, fe->address, fe->pmd); 1110 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1111 if (!page) { 1111 if (!page) {
1112 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1112 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1113 } else { 1113 } else {
@@ -1117,13 +1117,13 @@ alloc:
1117 } 1117 }
1118 ret |= VM_FAULT_WRITE; 1118 ret |= VM_FAULT_WRITE;
1119 } 1119 }
1120 spin_unlock(fe->ptl); 1120 spin_unlock(vmf->ptl);
1121out_mn: 1121out_mn:
1122 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 1122 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1123out: 1123out:
1124 return ret; 1124 return ret;
1125out_unlock: 1125out_unlock:
1126 spin_unlock(fe->ptl); 1126 spin_unlock(vmf->ptl);
1127 return ret; 1127 return ret;
1128} 1128}
1129 1129
@@ -1196,12 +1196,12 @@ out:
1196} 1196}
1197 1197
1198/* NUMA hinting page fault entry point for trans huge pmds */ 1198/* NUMA hinting page fault entry point for trans huge pmds */
1199int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) 1199int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1200{ 1200{
1201 struct vm_area_struct *vma = fe->vma; 1201 struct vm_area_struct *vma = vmf->vma;
1202 struct anon_vma *anon_vma = NULL; 1202 struct anon_vma *anon_vma = NULL;
1203 struct page *page; 1203 struct page *page;
1204 unsigned long haddr = fe->address & HPAGE_PMD_MASK; 1204 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1205 int page_nid = -1, this_nid = numa_node_id(); 1205 int page_nid = -1, this_nid = numa_node_id();
1206 int target_nid, last_cpupid = -1; 1206 int target_nid, last_cpupid = -1;
1207 bool page_locked; 1207 bool page_locked;
@@ -1209,8 +1209,8 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1209 bool was_writable; 1209 bool was_writable;
1210 int flags = 0; 1210 int flags = 0;
1211 1211
1212 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1212 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1213 if (unlikely(!pmd_same(pmd, *fe->pmd))) 1213 if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1214 goto out_unlock; 1214 goto out_unlock;
1215 1215
1216 /* 1216 /*
@@ -1218,9 +1218,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1218 * without disrupting NUMA hinting information. Do not relock and 1218 * without disrupting NUMA hinting information. Do not relock and
1219 * check_same as the page may no longer be mapped. 1219 * check_same as the page may no longer be mapped.
1220 */ 1220 */
1221 if (unlikely(pmd_trans_migrating(*fe->pmd))) { 1221 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1222 page = pmd_page(*fe->pmd); 1222 page = pmd_page(*vmf->pmd);
1223 spin_unlock(fe->ptl); 1223 spin_unlock(vmf->ptl);
1224 wait_on_page_locked(page); 1224 wait_on_page_locked(page);
1225 goto out; 1225 goto out;
1226 } 1226 }
@@ -1253,7 +1253,7 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1253 1253
1254 /* Migration could have started since the pmd_trans_migrating check */ 1254 /* Migration could have started since the pmd_trans_migrating check */
1255 if (!page_locked) { 1255 if (!page_locked) {
1256 spin_unlock(fe->ptl); 1256 spin_unlock(vmf->ptl);
1257 wait_on_page_locked(page); 1257 wait_on_page_locked(page);
1258 page_nid = -1; 1258 page_nid = -1;
1259 goto out; 1259 goto out;
@@ -1264,12 +1264,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1264 * to serialises splits 1264 * to serialises splits
1265 */ 1265 */
1266 get_page(page); 1266 get_page(page);
1267 spin_unlock(fe->ptl); 1267 spin_unlock(vmf->ptl);
1268 anon_vma = page_lock_anon_vma_read(page); 1268 anon_vma = page_lock_anon_vma_read(page);
1269 1269
1270 /* Confirm the PMD did not change while page_table_lock was released */ 1270 /* Confirm the PMD did not change while page_table_lock was released */
1271 spin_lock(fe->ptl); 1271 spin_lock(vmf->ptl);
1272 if (unlikely(!pmd_same(pmd, *fe->pmd))) { 1272 if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1273 unlock_page(page); 1273 unlock_page(page);
1274 put_page(page); 1274 put_page(page);
1275 page_nid = -1; 1275 page_nid = -1;
@@ -1287,9 +1287,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1287 * Migrate the THP to the requested node, returns with page unlocked 1287 * Migrate the THP to the requested node, returns with page unlocked
1288 * and access rights restored. 1288 * and access rights restored.
1289 */ 1289 */
1290 spin_unlock(fe->ptl); 1290 spin_unlock(vmf->ptl);
1291 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 1291 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
1292 fe->pmd, pmd, fe->address, page, target_nid); 1292 vmf->pmd, pmd, vmf->address, page, target_nid);
1293 if (migrated) { 1293 if (migrated) {
1294 flags |= TNF_MIGRATED; 1294 flags |= TNF_MIGRATED;
1295 page_nid = target_nid; 1295 page_nid = target_nid;
@@ -1304,18 +1304,19 @@ clear_pmdnuma:
1304 pmd = pmd_mkyoung(pmd); 1304 pmd = pmd_mkyoung(pmd);
1305 if (was_writable) 1305 if (was_writable)
1306 pmd = pmd_mkwrite(pmd); 1306 pmd = pmd_mkwrite(pmd);
1307 set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); 1307 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1308 update_mmu_cache_pmd(vma, fe->address, fe->pmd); 1308 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1309 unlock_page(page); 1309 unlock_page(page);
1310out_unlock: 1310out_unlock:
1311 spin_unlock(fe->ptl); 1311 spin_unlock(vmf->ptl);
1312 1312
1313out: 1313out:
1314 if (anon_vma) 1314 if (anon_vma)
1315 page_unlock_anon_vma_read(anon_vma); 1315 page_unlock_anon_vma_read(anon_vma);
1316 1316
1317 if (page_nid != -1) 1317 if (page_nid != -1)
1318 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); 1318 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1319 vmf->flags);
1319 1320
1320 return 0; 1321 return 0;
1321} 1322}