diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b67..72fb5f39bccc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
1593 | /* Ok, finally just insert the thing.. */ | 1593 | /* Ok, finally just insert the thing.. */ |
1594 | entry = pte_mkspecial(pfn_pte(pfn, prot)); | 1594 | entry = pte_mkspecial(pfn_pte(pfn, prot)); |
1595 | set_pte_at(mm, addr, pte, entry); | 1595 | set_pte_at(mm, addr, pte, entry); |
1596 | update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ | 1596 | update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ |
1597 | 1597 | ||
1598 | retval = 0; | 1598 | retval = 0; |
1599 | out_unlock: | 1599 | out_unlock: |
@@ -2116,7 +2116,7 @@ reuse: | |||
2116 | entry = pte_mkyoung(orig_pte); | 2116 | entry = pte_mkyoung(orig_pte); |
2117 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 2117 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
2118 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) | 2118 | if (ptep_set_access_flags(vma, address, page_table, entry,1)) |
2119 | update_mmu_cache(vma, address, entry); | 2119 | update_mmu_cache(vma, address, page_table); |
2120 | ret |= VM_FAULT_WRITE; | 2120 | ret |= VM_FAULT_WRITE; |
2121 | goto unlock; | 2121 | goto unlock; |
2122 | } | 2122 | } |
@@ -2185,7 +2185,7 @@ gotten: | |||
2185 | * new page to be mapped directly into the secondary page table. | 2185 | * new page to be mapped directly into the secondary page table. |
2186 | */ | 2186 | */ |
2187 | set_pte_at_notify(mm, address, page_table, entry); | 2187 | set_pte_at_notify(mm, address, page_table, entry); |
2188 | update_mmu_cache(vma, address, entry); | 2188 | update_mmu_cache(vma, address, page_table); |
2189 | if (old_page) { | 2189 | if (old_page) { |
2190 | /* | 2190 | /* |
2191 | * Only after switching the pte to the new page may | 2191 | * Only after switching the pte to the new page may |
@@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | /* No need to invalidate - it was non-present before */ | 2631 | /* No need to invalidate - it was non-present before */ |
2632 | update_mmu_cache(vma, address, pte); | 2632 | update_mmu_cache(vma, address, page_table); |
2633 | unlock: | 2633 | unlock: |
2634 | pte_unmap_unlock(page_table, ptl); | 2634 | pte_unmap_unlock(page_table, ptl); |
2635 | out: | 2635 | out: |
@@ -2694,7 +2694,7 @@ setpte: | |||
2694 | set_pte_at(mm, address, page_table, entry); | 2694 | set_pte_at(mm, address, page_table, entry); |
2695 | 2695 | ||
2696 | /* No need to invalidate - it was non-present before */ | 2696 | /* No need to invalidate - it was non-present before */ |
2697 | update_mmu_cache(vma, address, entry); | 2697 | update_mmu_cache(vma, address, page_table); |
2698 | unlock: | 2698 | unlock: |
2699 | pte_unmap_unlock(page_table, ptl); | 2699 | pte_unmap_unlock(page_table, ptl); |
2700 | return 0; | 2700 | return 0; |
@@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2855 | set_pte_at(mm, address, page_table, entry); | 2855 | set_pte_at(mm, address, page_table, entry); |
2856 | 2856 | ||
2857 | /* no need to invalidate: a not-present page won't be cached */ | 2857 | /* no need to invalidate: a not-present page won't be cached */ |
2858 | update_mmu_cache(vma, address, entry); | 2858 | update_mmu_cache(vma, address, page_table); |
2859 | } else { | 2859 | } else { |
2860 | if (charged) | 2860 | if (charged) |
2861 | mem_cgroup_uncharge_page(page); | 2861 | mem_cgroup_uncharge_page(page); |
@@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
2992 | } | 2992 | } |
2993 | entry = pte_mkyoung(entry); | 2993 | entry = pte_mkyoung(entry); |
2994 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { | 2994 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { |
2995 | update_mmu_cache(vma, address, entry); | 2995 | update_mmu_cache(vma, address, pte); |
2996 | } else { | 2996 | } else { |
2997 | /* | 2997 | /* |
2998 | * This is needed only for protection faults but the arch code | 2998 | * This is needed only for protection faults but the arch code |