diff options
| -rw-r--r-- | mm/memory.c | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c index 423e0e7c2f73..d14b251a25a6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1785,7 +1785,6 @@ gotten: | |||
| 1785 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); | 1785 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 1786 | if (likely(pte_same(*page_table, orig_pte))) { | 1786 | if (likely(pte_same(*page_table, orig_pte))) { |
| 1787 | if (old_page) { | 1787 | if (old_page) { |
| 1788 | page_remove_rmap(old_page, vma); | ||
| 1789 | if (!PageAnon(old_page)) { | 1788 | if (!PageAnon(old_page)) { |
| 1790 | dec_mm_counter(mm, file_rss); | 1789 | dec_mm_counter(mm, file_rss); |
| 1791 | inc_mm_counter(mm, anon_rss); | 1790 | inc_mm_counter(mm, anon_rss); |
| @@ -1807,6 +1806,32 @@ gotten: | |||
| 1807 | lru_cache_add_active(new_page); | 1806 | lru_cache_add_active(new_page); |
| 1808 | page_add_new_anon_rmap(new_page, vma, address); | 1807 | page_add_new_anon_rmap(new_page, vma, address); |
| 1809 | 1808 | ||
| 1809 | if (old_page) { | ||
| 1810 | /* | ||
| 1811 | * Only after switching the pte to the new page may | ||
| 1812 | * we remove the mapcount here. Otherwise another | ||
| 1813 | * process may come and find the rmap count decremented | ||
| 1814 | * before the pte is switched to the new page, and | ||
| 1815 | * "reuse" the old page writing into it while our pte | ||
| 1816 | * here still points into it and can be read by other | ||
| 1817 | * threads. | ||
| 1818 | * | ||
| 1819 | * The critical issue is to order this | ||
| 1820 | * page_remove_rmap with the ptp_clear_flush above. | ||
| 1821 | * Those stores are ordered by (if nothing else,) | ||
| 1822 | * the barrier present in the atomic_add_negative | ||
| 1823 | * in page_remove_rmap. | ||
| 1824 | * | ||
| 1825 | * Then the TLB flush in ptep_clear_flush ensures that | ||
| 1826 | * no process can access the old page before the | ||
| 1827 | * decremented mapcount is visible. And the old page | ||
| 1828 | * cannot be reused until after the decremented | ||
| 1829 | * mapcount is visible. So transitively, TLBs to | ||
| 1830 | * old page will be flushed before it can be reused. | ||
| 1831 | */ | ||
| 1832 | page_remove_rmap(old_page, vma); | ||
| 1833 | } | ||
| 1834 | |||
| 1810 | /* Free the old page.. */ | 1835 | /* Free the old page.. */ |
| 1811 | new_page = old_page; | 1836 | new_page = old_page; |
| 1812 | ret |= VM_FAULT_WRITE; | 1837 | ret |= VM_FAULT_WRITE; |
