diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 14 | ||||
-rw-r--r-- | mm/migrate.c | 2 |
3 files changed, 10 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e91b81b63670..94cd94df56e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2088,7 +2088,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, | |||
2088 | 2088 | ||
2089 | entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); | 2089 | entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); |
2090 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 2090 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { |
2091 | update_mmu_cache(vma, address, entry); | 2091 | update_mmu_cache(vma, address, ptep); |
2092 | } | 2092 | } |
2093 | } | 2093 | } |
2094 | 2094 | ||
@@ -2559,7 +2559,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2559 | entry = pte_mkyoung(entry); | 2559 | entry = pte_mkyoung(entry); |
2560 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, | 2560 | if (huge_ptep_set_access_flags(vma, address, ptep, entry, |
2561 | flags & FAULT_FLAG_WRITE)) | 2561 | flags & FAULT_FLAG_WRITE)) |
2562 | update_mmu_cache(vma, address, entry); | 2562 | update_mmu_cache(vma, address, ptep); |
2563 | 2563 | ||
2564 | out_page_table_lock: | 2564 | out_page_table_lock: |
2565 | spin_unlock(&mm->page_table_lock); | 2565 | spin_unlock(&mm->page_table_lock); |
diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b67..72fb5f39bccc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
1593 | /* Ok, finally just insert the thing.. */ | 1593 | /* Ok, finally just insert the thing.. */ |
1594 | entry = pte_mkspecial(pfn_pte(pfn, prot)); | 1594 | entry = pte_mkspecial(pfn_pte(pfn, prot)); |
1595 | set_pte_at(mm, addr, pte, entry); | 1595 | set_pte_at(mm, addr, pte, entry); |
1596 | update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ | 1596 | update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ |
1597 | 1597 | ||
1598 | retval = 0; | 1598 | retval = 0; |
1599 | out_unlock: | 1599 | out_unlock: |
@@ -2116,7 +2116,7 @@ reuse: | |||
2116 | entry = pte_mkyoung(orig_pte); | 2116 | entry = pte_mkyoung(orig_pte); |
2117 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 2117 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
2118 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) | 2118 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) |
2119 | update_mmu_cache(vma, address, entry); | 2119 | update_mmu_cache(vma, address, page_table); |
2120 | ret |= VM_FAULT_WRITE; | 2120 | ret |= VM_FAULT_WRITE; |
2121 | goto unlock; | 2121 | goto unlock; |
2122 | } | 2122 | } |
@@ -2185,7 +2185,7 @@ gotten: | |||
2185 | * new page to be mapped directly into the secondary page table. | 2185 | * new page to be mapped directly into the secondary page table. |
2186 | */ | 2186 | */ |
2187 | set_pte_at_notify(mm, address, page_table, entry); | 2187 | set_pte_at_notify(mm, address, page_table, entry); |
2188 | update_mmu_cache(vma, address, entry); | 2188 | update_mmu_cache(vma, address, page_table); |
2189 | if (old_page) { | 2189 | if (old_page) { |
2190 | /* | 2190 | /* |
2191 | * Only after switching the pte to the new page may | 2191 | * Only after switching the pte to the new page may |
@@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | /* No need to invalidate - it was non-present before */ | 2631 | /* No need to invalidate - it was non-present before */ |
2632 | update_mmu_cache(vma, address, pte); | 2632 | update_mmu_cache(vma, address, page_table); |
2633 | unlock: | 2633 | unlock: |
2634 | pte_unmap_unlock(page_table, ptl); | 2634 | pte_unmap_unlock(page_table, ptl); |
2635 | out: | 2635 | out: |
@@ -2694,7 +2694,7 @@ setpte: | |||
2694 | set_pte_at(mm, address, page_table, entry); | 2694 | set_pte_at(mm, address, page_table, entry); |
2695 | 2695 | ||
2696 | /* No need to invalidate - it was non-present before */ | 2696 | /* No need to invalidate - it was non-present before */ |
2697 | update_mmu_cache(vma, address, entry); | 2697 | update_mmu_cache(vma, address, page_table); |
2698 | unlock: | 2698 | unlock: |
2699 | pte_unmap_unlock(page_table, ptl); | 2699 | pte_unmap_unlock(page_table, ptl); |
2700 | return 0; | 2700 | return 0; |
@@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2855 | set_pte_at(mm, address, page_table, entry); | 2855 | set_pte_at(mm, address, page_table, entry); |
2856 | 2856 | ||
2857 | /* no need to invalidate: a not-present page won't be cached */ | 2857 | /* no need to invalidate: a not-present page won't be cached */ |
2858 | update_mmu_cache(vma, address, entry); | 2858 | update_mmu_cache(vma, address, page_table); |
2859 | } else { | 2859 | } else { |
2860 | if (charged) | 2860 | if (charged) |
2861 | mem_cgroup_uncharge_page(page); | 2861 | mem_cgroup_uncharge_page(page); |
@@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2992 | } | 2992 | } |
2993 | entry = pte_mkyoung(entry); | 2993 | entry = pte_mkyoung(entry); |
2994 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { | 2994 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { |
2995 | update_mmu_cache(vma, address, entry); | 2995 | update_mmu_cache(vma, address, pte); |
2996 | } else { | 2996 | } else { |
2997 | /* | 2997 | /* |
2998 | * This is needed only for protection faults but the arch code | 2998 | * This is needed only for protection faults but the arch code |
diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0926b2..e58e5da25b91 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -134,7 +134,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
134 | page_add_file_rmap(new); | 134 | page_add_file_rmap(new); |
135 | 135 | ||
136 | /* No need to invalidate - it was non-present before */ | 136 | /* No need to invalidate - it was non-present before */ |
137 | update_mmu_cache(vma, addr, pte); | 137 | update_mmu_cache(vma, addr, ptep); |
138 | unlock: | 138 | unlock: |
139 | pte_unmap_unlock(ptep, ptl); | 139 | pte_unmap_unlock(ptep, ptl); |
140 | out: | 140 | out: |