aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Liu <lliubbo@gmail.com>2012-12-11 19:00:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commitb3092b3b734f146d96ca023a75cacf78078f96d5 (patch)
tree56dcf17fe72ff95b5769f76265684d427a389b96
parentfa475e517adb422cb3492e636195f9b2c0d009c8 (diff)
thp: cleanup: introduce mk_huge_pmd()
Introduce mk_huge_pmd() to simplify the code Signed-off-by: Bob Liu <lliubbo@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Ni zhan Chen <nizhan.chen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/huge_memory.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 26002683a16c..ea5fb93a53a9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
606 return pmd; 606 return pmd;
607} 607}
608 608
609static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
610{
611 pmd_t entry;
612 entry = mk_pmd(page, vma->vm_page_prot);
613 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
614 entry = pmd_mkhuge(entry);
615 return entry;
616}
617
609static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 618static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
610 struct vm_area_struct *vma, 619 struct vm_area_struct *vma,
611 unsigned long haddr, pmd_t *pmd, 620 unsigned long haddr, pmd_t *pmd,
@@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
629 pte_free(mm, pgtable); 638 pte_free(mm, pgtable);
630 } else { 639 } else {
631 pmd_t entry; 640 pmd_t entry;
632 entry = mk_pmd(page, vma->vm_page_prot); 641 entry = mk_huge_pmd(page, vma);
633 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
634 entry = pmd_mkhuge(entry);
635 /* 642 /*
636 * The spinlocking to take the lru_lock inside 643 * The spinlocking to take the lru_lock inside
637 * page_add_new_anon_rmap() acts as a full memory 644 * page_add_new_anon_rmap() acts as a full memory
@@ -951,9 +958,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
951 } else { 958 } else {
952 pmd_t entry; 959 pmd_t entry;
953 VM_BUG_ON(!PageHead(page)); 960 VM_BUG_ON(!PageHead(page));
954 entry = mk_pmd(new_page, vma->vm_page_prot); 961 entry = mk_huge_pmd(new_page, vma);
955 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
956 entry = pmd_mkhuge(entry);
957 pmdp_clear_flush(vma, haddr, pmd); 962 pmdp_clear_flush(vma, haddr, pmd);
958 page_add_new_anon_rmap(new_page, vma, haddr); 963 page_add_new_anon_rmap(new_page, vma, haddr);
959 set_pmd_at(mm, haddr, pmd, entry); 964 set_pmd_at(mm, haddr, pmd, entry);
@@ -2000,9 +2005,7 @@ static void collapse_huge_page(struct mm_struct *mm,
2000 __SetPageUptodate(new_page); 2005 __SetPageUptodate(new_page);
2001 pgtable = pmd_pgtable(_pmd); 2006 pgtable = pmd_pgtable(_pmd);
2002 2007
2003 _pmd = mk_pmd(new_page, vma->vm_page_prot); 2008 _pmd = mk_huge_pmd(new_page, vma);
2004 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2005 _pmd = pmd_mkhuge(_pmd);
2006 2009
2007 /* 2010 /*
2008 * spin_lock() below is not the equivalent of smp_wmb(), so 2011 * spin_lock() below is not the equivalent of smp_wmb(), so