aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2012-12-12 16:50:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 20:38:31 -0500
commitfc9fe822f7112db23e51e2be3b886f5d8f0afdb6 (patch)
tree5578922a977e964116767ffaee84533836a32128 /mm
parent479f0abbfd253d1117a35c1df12755d27a2a0705 (diff)
thp: copy_huge_pmd(): copy huge zero page
It's easy to copy huge zero page. Just set destination pmd to huge zero page. It's safe to copy huge zero page since we have none yet :-p [rientjes@google.com: fix comment] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: "H. Peter Anvin" <hpa@linux.intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ee34ddb46ad..650625390f61 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -708,6 +708,18 @@ static inline struct page *alloc_hugepage(int defrag)
708} 708}
709#endif 709#endif
710 710
711static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
712 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
713{
714 pmd_t entry;
715 entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
716 entry = pmd_wrprotect(entry);
717 entry = pmd_mkhuge(entry);
718 set_pmd_at(mm, haddr, pmd, entry);
719 pgtable_trans_huge_deposit(mm, pgtable);
720 mm->nr_ptes++;
721}
722
711int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 723int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
712 unsigned long address, pmd_t *pmd, 724 unsigned long address, pmd_t *pmd,
713 unsigned int flags) 725 unsigned int flags)
@@ -785,6 +797,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
785 pte_free(dst_mm, pgtable); 797 pte_free(dst_mm, pgtable);
786 goto out_unlock; 798 goto out_unlock;
787 } 799 }
800 /*
801 * mm->page_table_lock is enough to be sure that huge zero pmd is not
802 * under splitting since we don't split the page itself, only pmd to
803 * a page table.
804 */
805 if (is_huge_zero_pmd(pmd)) {
806 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
807 ret = 0;
808 goto out_unlock;
809 }
788 if (unlikely(pmd_trans_splitting(pmd))) { 810 if (unlikely(pmd_trans_splitting(pmd))) {
789 /* split huge page running from under us */ 811 /* split huge page running from under us */
790 spin_unlock(&src_mm->page_table_lock); 812 spin_unlock(&src_mm->page_table_lock);