aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a8ca04faaea6..67f0ab9077d9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -51,6 +51,7 @@
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/writeback.h> 52#include <linux/writeback.h>
53#include <linux/memcontrol.h> 53#include <linux/memcontrol.h>
54#include <linux/mmu_notifier.h>
54 55
55#include <asm/pgalloc.h> 56#include <asm/pgalloc.h>
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
@@ -652,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
652 unsigned long next; 653 unsigned long next;
653 unsigned long addr = vma->vm_start; 654 unsigned long addr = vma->vm_start;
654 unsigned long end = vma->vm_end; 655 unsigned long end = vma->vm_end;
656 int ret;
655 657
656 /* 658 /*
657 * Don't copy ptes where a page fault will fill them correctly. 659 * Don't copy ptes where a page fault will fill them correctly.
@@ -667,17 +669,33 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
667 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
668 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
669 671
672 /*
673 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the
675 * parent mm. And a permission downgrade will only happen if
676 * is_cow_mapping() returns true.
677 */
678 if (is_cow_mapping(vma->vm_flags))
679 mmu_notifier_invalidate_range_start(src_mm, addr, end);
680
681 ret = 0;
670 dst_pgd = pgd_offset(dst_mm, addr); 682 dst_pgd = pgd_offset(dst_mm, addr);
671 src_pgd = pgd_offset(src_mm, addr); 683 src_pgd = pgd_offset(src_mm, addr);
672 do { 684 do {
673 next = pgd_addr_end(addr, end); 685 next = pgd_addr_end(addr, end);
674 if (pgd_none_or_clear_bad(src_pgd)) 686 if (pgd_none_or_clear_bad(src_pgd))
675 continue; 687 continue;
676 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 688 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
677 vma, addr, next)) 689 vma, addr, next))) {
678 return -ENOMEM; 690 ret = -ENOMEM;
691 break;
692 }
679 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 693 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
680 return 0; 694
695 if (is_cow_mapping(vma->vm_flags))
696 mmu_notifier_invalidate_range_end(src_mm,
697 vma->vm_start, end);
698 return ret;
681} 699}
682 700
683static unsigned long zap_pte_range(struct mmu_gather *tlb, 701static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -881,7 +899,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
881 unsigned long start = start_addr; 899 unsigned long start = start_addr;
882 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 900 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
883 int fullmm = (*tlbp)->fullmm; 901 int fullmm = (*tlbp)->fullmm;
902 struct mm_struct *mm = vma->vm_mm;
884 903
904 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
885 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 905 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
886 unsigned long end; 906 unsigned long end;
887 907
@@ -946,6 +966,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
946 } 966 }
947 } 967 }
948out: 968out:
969 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
949 return start; /* which is now the end (or restart) address */ 970 return start; /* which is now the end (or restart) address */
950} 971}
951 972
@@ -1616,10 +1637,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1616{ 1637{
1617 pgd_t *pgd; 1638 pgd_t *pgd;
1618 unsigned long next; 1639 unsigned long next;
1619 unsigned long end = addr + size; 1640 unsigned long start = addr, end = addr + size;
1620 int err; 1641 int err;
1621 1642
1622 BUG_ON(addr >= end); 1643 BUG_ON(addr >= end);
1644 mmu_notifier_invalidate_range_start(mm, start, end);
1623 pgd = pgd_offset(mm, addr); 1645 pgd = pgd_offset(mm, addr);
1624 do { 1646 do {
1625 next = pgd_addr_end(addr, end); 1647 next = pgd_addr_end(addr, end);
@@ -1627,6 +1649,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1627 if (err) 1649 if (err)
1628 break; 1650 break;
1629 } while (pgd++, addr = next, addr != end); 1651 } while (pgd++, addr = next, addr != end);
1652 mmu_notifier_invalidate_range_end(mm, start, end);
1630 return err; 1653 return err;
1631} 1654}
1632EXPORT_SYMBOL_GPL(apply_to_page_range); 1655EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1839,7 +1862,7 @@ gotten:
1839 * seen in the presence of one thread doing SMC and another 1862 * seen in the presence of one thread doing SMC and another
1840 * thread doing COW. 1863 * thread doing COW.
1841 */ 1864 */
1842 ptep_clear_flush(vma, address, page_table); 1865 ptep_clear_flush_notify(vma, address, page_table);
1843 set_pte_at(mm, address, page_table, entry); 1866 set_pte_at(mm, address, page_table, entry);
1844 update_mmu_cache(vma, address, entry); 1867 update_mmu_cache(vma, address, entry);
1845 lru_cache_add_active(new_page); 1868 lru_cache_add_active(new_page);