aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c38
1 files changed, 31 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 262e3eb6601a..67f0ab9077d9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -51,6 +51,7 @@
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/writeback.h> 52#include <linux/writeback.h>
53#include <linux/memcontrol.h> 53#include <linux/memcontrol.h>
54#include <linux/mmu_notifier.h>
54 55
55#include <asm/pgalloc.h> 56#include <asm/pgalloc.h>
56#include <asm/uaccess.h> 57#include <asm/uaccess.h>
@@ -374,7 +375,8 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
374 * 375 *
375 * The calling function must still handle the error. 376 * The calling function must still handle the error.
376 */ 377 */
377void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) 378static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
379 unsigned long vaddr)
378{ 380{
379 printk(KERN_ERR "Bad pte = %08llx, process = %s, " 381 printk(KERN_ERR "Bad pte = %08llx, process = %s, "
380 "vm_flags = %lx, vaddr = %lx\n", 382 "vm_flags = %lx, vaddr = %lx\n",
@@ -651,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
651 unsigned long next; 653 unsigned long next;
652 unsigned long addr = vma->vm_start; 654 unsigned long addr = vma->vm_start;
653 unsigned long end = vma->vm_end; 655 unsigned long end = vma->vm_end;
656 int ret;
654 657
655 /* 658 /*
656 * Don't copy ptes where a page fault will fill them correctly. 659 * Don't copy ptes where a page fault will fill them correctly.
@@ -666,17 +669,33 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
666 if (is_vm_hugetlb_page(vma)) 669 if (is_vm_hugetlb_page(vma))
667 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
668 671
672 /*
673 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the
675 * parent mm. And a permission downgrade will only happen if
676 * is_cow_mapping() returns true.
677 */
678 if (is_cow_mapping(vma->vm_flags))
679 mmu_notifier_invalidate_range_start(src_mm, addr, end);
680
681 ret = 0;
669 dst_pgd = pgd_offset(dst_mm, addr); 682 dst_pgd = pgd_offset(dst_mm, addr);
670 src_pgd = pgd_offset(src_mm, addr); 683 src_pgd = pgd_offset(src_mm, addr);
671 do { 684 do {
672 next = pgd_addr_end(addr, end); 685 next = pgd_addr_end(addr, end);
673 if (pgd_none_or_clear_bad(src_pgd)) 686 if (pgd_none_or_clear_bad(src_pgd))
674 continue; 687 continue;
675 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 688 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
676 vma, addr, next)) 689 vma, addr, next))) {
677 return -ENOMEM; 690 ret = -ENOMEM;
691 break;
692 }
678 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 693 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
679 return 0; 694
695 if (is_cow_mapping(vma->vm_flags))
696 mmu_notifier_invalidate_range_end(src_mm,
697 vma->vm_start, end);
698 return ret;
680} 699}
681 700
682static unsigned long zap_pte_range(struct mmu_gather *tlb, 701static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -880,7 +899,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
880 unsigned long start = start_addr; 899 unsigned long start = start_addr;
881 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 900 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
882 int fullmm = (*tlbp)->fullmm; 901 int fullmm = (*tlbp)->fullmm;
902 struct mm_struct *mm = vma->vm_mm;
883 903
904 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
884 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 905 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
885 unsigned long end; 906 unsigned long end;
886 907
@@ -945,6 +966,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
945 } 966 }
946 } 967 }
947out: 968out:
969 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
948 return start; /* which is now the end (or restart) address */ 970 return start; /* which is now the end (or restart) address */
949} 971}
950 972
@@ -1615,10 +1637,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1615{ 1637{
1616 pgd_t *pgd; 1638 pgd_t *pgd;
1617 unsigned long next; 1639 unsigned long next;
1618 unsigned long end = addr + size; 1640 unsigned long start = addr, end = addr + size;
1619 int err; 1641 int err;
1620 1642
1621 BUG_ON(addr >= end); 1643 BUG_ON(addr >= end);
1644 mmu_notifier_invalidate_range_start(mm, start, end);
1622 pgd = pgd_offset(mm, addr); 1645 pgd = pgd_offset(mm, addr);
1623 do { 1646 do {
1624 next = pgd_addr_end(addr, end); 1647 next = pgd_addr_end(addr, end);
@@ -1626,6 +1649,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1626 if (err) 1649 if (err)
1627 break; 1650 break;
1628 } while (pgd++, addr = next, addr != end); 1651 } while (pgd++, addr = next, addr != end);
1652 mmu_notifier_invalidate_range_end(mm, start, end);
1629 return err; 1653 return err;
1630} 1654}
1631EXPORT_SYMBOL_GPL(apply_to_page_range); 1655EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1838,7 +1862,7 @@ gotten:
1838 * seen in the presence of one thread doing SMC and another 1862 * seen in the presence of one thread doing SMC and another
1839 * thread doing COW. 1863 * thread doing COW.
1840 */ 1864 */
1841 ptep_clear_flush(vma, address, page_table); 1865 ptep_clear_flush_notify(vma, address, page_table);
1842 set_pte_at(mm, address, page_table, entry); 1866 set_pte_at(mm, address, page_table, entry);
1843 update_mmu_cache(vma, address, entry); 1867 update_mmu_cache(vma, address, entry);
1844 lru_cache_add_active(new_page); 1868 lru_cache_add_active(new_page);