aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-21 19:40:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 20:20:51 -0500
commitb6ec57f4b92e9bae4617f7d98a054d45370284bb (patch)
tree31b894dc2fa7c8a885baa4be7094c73002999719
parent404a47410c26a115123885977053e9a1a4460929 (diff)
thp: change pmd_trans_huge_lock() interface to return ptl
After THP refcounting rework we have only two possible return values from pmd_trans_huge_lock(): success and failure. Return-by-pointer for ptl doesn't make much sense in this case. Let's convert pmd_trans_huge_lock() to return ptl on success and NULL on failure. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c12
-rw-r--r--include/linux/huge_mm.h16
-rw-r--r--mm/huge_memory.c24
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/mincore.c3
5 files changed, 36 insertions, 25 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 71ffc91060f6..85d16c67c33e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
602 pte_t *pte; 602 pte_t *pte;
603 spinlock_t *ptl; 603 spinlock_t *ptl;
604 604
605 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 605 ptl = pmd_trans_huge_lock(pmd, vma);
606 if (ptl) {
606 smaps_pmd_entry(pmd, addr, walk); 607 smaps_pmd_entry(pmd, addr, walk);
607 spin_unlock(ptl); 608 spin_unlock(ptl);
608 return 0; 609 return 0;
@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
913 spinlock_t *ptl; 914 spinlock_t *ptl;
914 struct page *page; 915 struct page *page;
915 916
916 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 917 ptl = pmd_trans_huge_lock(pmd, vma);
918 if (ptl) {
917 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 919 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
918 clear_soft_dirty_pmd(vma, addr, pmd); 920 clear_soft_dirty_pmd(vma, addr, pmd);
919 goto out; 921 goto out;
@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1187 int err = 0; 1189 int err = 0;
1188 1190
1189#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1191#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1190 if (pmd_trans_huge_lock(pmdp, vma, &ptl)) { 1192 ptl = pmd_trans_huge_lock(pmdp, vma);
1193 if (ptl) {
1191 u64 flags = 0, frame = 0; 1194 u64 flags = 0, frame = 0;
1192 pmd_t pmd = *pmdp; 1195 pmd_t pmd = *pmdp;
1193 1196
@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1519 pte_t *orig_pte; 1522 pte_t *orig_pte;
1520 pte_t *pte; 1523 pte_t *pte;
1521 1524
1522 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 1525 ptl = pmd_trans_huge_lock(pmd, vma);
1526 if (ptl) {
1523 pte_t huge_pte = *(pte_t *)pmd; 1527 pte_t huge_pte = *(pte_t *)pmd;
1524 struct page *page; 1528 struct page *page;
1525 1529
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfe81e10bd54..459fd25b378e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
120 unsigned long start, 120 unsigned long start,
121 unsigned long end, 121 unsigned long end,
122 long adjust_next); 122 long adjust_next);
123extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 123extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
124 spinlock_t **ptl); 124 struct vm_area_struct *vma);
125/* mmap_sem must be held on entry */ 125/* mmap_sem must be held on entry */
126static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 126static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
127 spinlock_t **ptl) 127 struct vm_area_struct *vma)
128{ 128{
129 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 129 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
130 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 130 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
131 return __pmd_trans_huge_lock(pmd, vma, ptl); 131 return __pmd_trans_huge_lock(pmd, vma);
132 else 132 else
133 return false; 133 return false;
134} 134}
@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
190 long adjust_next) 190 long adjust_next)
191{ 191{
192} 192}
193static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 193static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
194 spinlock_t **ptl) 194 struct vm_area_struct *vma)
195{ 195{
196 return false; 196 return NULL;
197} 197}
198 198
199static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 199static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8ad580273521..2d1ffe9d0e26 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1560 struct mm_struct *mm = tlb->mm; 1560 struct mm_struct *mm = tlb->mm;
1561 int ret = 0; 1561 int ret = 0;
1562 1562
1563 if (!pmd_trans_huge_lock(pmd, vma, &ptl)) 1563 ptl = pmd_trans_huge_lock(pmd, vma);
1564 if (!ptl)
1564 goto out_unlocked; 1565 goto out_unlocked;
1565 1566
1566 orig_pmd = *pmd; 1567 orig_pmd = *pmd;
@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1627 pmd_t orig_pmd; 1628 pmd_t orig_pmd;
1628 spinlock_t *ptl; 1629 spinlock_t *ptl;
1629 1630
1630 if (!__pmd_trans_huge_lock(pmd, vma, &ptl)) 1631 ptl = __pmd_trans_huge_lock(pmd, vma);
1632 if (!ptl)
1631 return 0; 1633 return 0;
1632 /* 1634 /*
1633 * For architectures like ppc64 we look at deposited pgtable 1635 * For architectures like ppc64 we look at deposited pgtable
@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1690 * We don't have to worry about the ordering of src and dst 1692 * We don't have to worry about the ordering of src and dst
1691 * ptlocks because exclusive mmap_sem prevents deadlock. 1693 * ptlocks because exclusive mmap_sem prevents deadlock.
1692 */ 1694 */
1693 if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) { 1695 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1696 if (old_ptl) {
1694 new_ptl = pmd_lockptr(mm, new_pmd); 1697 new_ptl = pmd_lockptr(mm, new_pmd);
1695 if (new_ptl != old_ptl) 1698 if (new_ptl != old_ptl)
1696 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1699 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1724 spinlock_t *ptl; 1727 spinlock_t *ptl;
1725 int ret = 0; 1728 int ret = 0;
1726 1729
1727 if (__pmd_trans_huge_lock(pmd, vma, &ptl)) { 1730 ptl = __pmd_trans_huge_lock(pmd, vma);
1731 if (ptl) {
1728 pmd_t entry; 1732 pmd_t entry;
1729 bool preserve_write = prot_numa && pmd_write(*pmd); 1733 bool preserve_write = prot_numa && pmd_write(*pmd);
1730 ret = 1; 1734 ret = 1;
@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1760 * Note that if it returns true, this routine returns without unlocking page 1764 * Note that if it returns true, this routine returns without unlocking page
1761 * table lock. So callers must unlock it. 1765 * table lock. So callers must unlock it.
1762 */ 1766 */
1763bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1767spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1764 spinlock_t **ptl)
1765{ 1768{
1766 *ptl = pmd_lock(vma->vm_mm, pmd); 1769 spinlock_t *ptl;
1770 ptl = pmd_lock(vma->vm_mm, pmd);
1767 if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 1771 if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
1768 return true; 1772 return ptl;
1769 spin_unlock(*ptl); 1773 spin_unlock(ptl);
1770 return false; 1774 return NULL;
1771} 1775}
1772 1776
1773#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 1777#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ca052f2a4a0b..d06cae2de783 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4638 pte_t *pte; 4638 pte_t *pte;
4639 spinlock_t *ptl; 4639 spinlock_t *ptl;
4640 4640
4641 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 4641 ptl = pmd_trans_huge_lock(pmd, vma);
4642 if (ptl) {
4642 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4643 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4643 mc.precharge += HPAGE_PMD_NR; 4644 mc.precharge += HPAGE_PMD_NR;
4644 spin_unlock(ptl); 4645 spin_unlock(ptl);
@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4826 union mc_target target; 4827 union mc_target target;
4827 struct page *page; 4828 struct page *page;
4828 4829
4829 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 4830 ptl = pmd_trans_huge_lock(pmd, vma);
4831 if (ptl) {
4830 if (mc.precharge < HPAGE_PMD_NR) { 4832 if (mc.precharge < HPAGE_PMD_NR) {
4831 spin_unlock(ptl); 4833 spin_unlock(ptl);
4832 return 0; 4834 return 0;
diff --git a/mm/mincore.c b/mm/mincore.c
index 2a565ed8bb49..563f32045490 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
117 unsigned char *vec = walk->private; 117 unsigned char *vec = walk->private;
118 int nr = (end - addr) >> PAGE_SHIFT; 118 int nr = (end - addr) >> PAGE_SHIFT;
119 119
120 if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 120 ptl = pmd_trans_huge_lock(pmd, vma);
121 if (ptl) {
121 memset(vec, 1, nr); 122 memset(vec, 1, nr);
122 spin_unlock(ptl); 123 spin_unlock(ptl);
123 goto out; 124 goto out;