aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b3c42f0f65c2..bd16dcaeefb8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1611,10 +1611,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1611 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1611 flush_cache_page(vma, address, pte_pfn(orig_pte));
1612 entry = pte_mkyoung(orig_pte); 1612 entry = pte_mkyoung(orig_pte);
1613 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1613 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1614 if (ptep_set_access_flags(vma, address, page_table, entry,1)) { 1614 if (ptep_set_access_flags(vma, address, page_table, entry,1))
1615 update_mmu_cache(vma, address, entry); 1615 update_mmu_cache(vma, address, entry);
1616 lazy_mmu_prot_update(entry);
1617 }
1618 ret |= VM_FAULT_WRITE; 1616 ret |= VM_FAULT_WRITE;
1619 goto unlock; 1617 goto unlock;
1620 } 1618 }
@@ -1650,7 +1648,6 @@ gotten:
1650 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1648 flush_cache_page(vma, address, pte_pfn(orig_pte));
1651 entry = mk_pte(new_page, vma->vm_page_prot); 1649 entry = mk_pte(new_page, vma->vm_page_prot);
1652 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1650 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1653 lazy_mmu_prot_update(entry);
1654 /* 1651 /*
1655 * Clear the pte entry and flush it first, before updating the 1652 * Clear the pte entry and flush it first, before updating the
1656 * pte with the new entry. This will avoid a race condition 1653 * pte with the new entry. This will avoid a race condition
@@ -2180,7 +2177,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2180 2177
2181 /* No need to invalidate - it was non-present before */ 2178 /* No need to invalidate - it was non-present before */
2182 update_mmu_cache(vma, address, entry); 2179 update_mmu_cache(vma, address, entry);
2183 lazy_mmu_prot_update(entry);
2184unlock: 2180unlock:
2185 pte_unmap_unlock(page_table, ptl); 2181 pte_unmap_unlock(page_table, ptl);
2186 return 0; 2182 return 0;
@@ -2333,7 +2329,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2333 2329
2334 /* no need to invalidate: a not-present page won't be cached */ 2330 /* no need to invalidate: a not-present page won't be cached */
2335 update_mmu_cache(vma, address, entry); 2331 update_mmu_cache(vma, address, entry);
2336 lazy_mmu_prot_update(entry);
2337 } else { 2332 } else {
2338 if (anon) 2333 if (anon)
2339 page_cache_release(page); 2334 page_cache_release(page);
@@ -2505,7 +2500,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
2505 entry = pte_mkyoung(entry); 2500 entry = pte_mkyoung(entry);
2506 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2501 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
2507 update_mmu_cache(vma, address, entry); 2502 update_mmu_cache(vma, address, entry);
2508 lazy_mmu_prot_update(entry);
2509 } else { 2503 } else {
2510 /* 2504 /*
2511 * This is needed only for protection faults but the arch code 2505 * This is needed only for protection faults but the arch code