diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2013-11-14 17:30:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-14 19:32:14 -0500 |
commit | 117b0791ac42f2ec447bc864e70ad622b5604059 (patch) | |
tree | 3381f98791bcafd31ea4ae9d0fa566815112020d /mm/huge_memory.c | |
parent | bf929152e9f6c49b66fad4ebf08cc95b02ce48f5 (diff) |
mm, thp: move ptl taking inside page_check_address_pmd()
With split page table lock we can't know which lock we need to take
before we find the relevant pmd.
Let's move lock taking inside the function.
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Alex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 471eb04066ff..c2082ab4fc93 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1552,23 +1552,33 @@ int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, | |||
1552 | return 0; | 1552 | return 0; |
1553 | } | 1553 | } |
1554 | 1554 | ||
1555 | /* | ||
1556 | * This function returns whether a given @page is mapped onto the @address | ||
1557 | * in the virtual space of @mm. | ||
1558 | * | ||
1559 | * When it's true, this function returns *pmd with holding the page table lock | ||
1560 | * and passing it back to the caller via @ptl. | ||
1561 | * If it's false, returns NULL without holding the page table lock. | ||
1562 | */ | ||
1555 | pmd_t *page_check_address_pmd(struct page *page, | 1563 | pmd_t *page_check_address_pmd(struct page *page, |
1556 | struct mm_struct *mm, | 1564 | struct mm_struct *mm, |
1557 | unsigned long address, | 1565 | unsigned long address, |
1558 | enum page_check_address_pmd_flag flag) | 1566 | enum page_check_address_pmd_flag flag, |
1567 | spinlock_t **ptl) | ||
1559 | { | 1568 | { |
1560 | pmd_t *pmd, *ret = NULL; | 1569 | pmd_t *pmd; |
1561 | 1570 | ||
1562 | if (address & ~HPAGE_PMD_MASK) | 1571 | if (address & ~HPAGE_PMD_MASK) |
1563 | goto out; | 1572 | return NULL; |
1564 | 1573 | ||
1565 | pmd = mm_find_pmd(mm, address); | 1574 | pmd = mm_find_pmd(mm, address); |
1566 | if (!pmd) | 1575 | if (!pmd) |
1567 | goto out; | 1576 | return NULL; |
1577 | *ptl = pmd_lock(mm, pmd); | ||
1568 | if (pmd_none(*pmd)) | 1578 | if (pmd_none(*pmd)) |
1569 | goto out; | 1579 | goto unlock; |
1570 | if (pmd_page(*pmd) != page) | 1580 | if (pmd_page(*pmd) != page) |
1571 | goto out; | 1581 | goto unlock; |
1572 | /* | 1582 | /* |
1573 | * split_vma() may create temporary aliased mappings. There is | 1583 | * split_vma() may create temporary aliased mappings. There is |
1574 | * no risk as long as all huge pmd are found and have their | 1584 | * no risk as long as all huge pmd are found and have their |
@@ -1578,14 +1588,15 @@ pmd_t *page_check_address_pmd(struct page *page, | |||
1578 | */ | 1588 | */ |
1579 | if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && | 1589 | if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && |
1580 | pmd_trans_splitting(*pmd)) | 1590 | pmd_trans_splitting(*pmd)) |
1581 | goto out; | 1591 | goto unlock; |
1582 | if (pmd_trans_huge(*pmd)) { | 1592 | if (pmd_trans_huge(*pmd)) { |
1583 | VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && | 1593 | VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && |
1584 | !pmd_trans_splitting(*pmd)); | 1594 | !pmd_trans_splitting(*pmd)); |
1585 | ret = pmd; | 1595 | return pmd; |
1586 | } | 1596 | } |
1587 | out: | 1597 | unlock: |
1588 | return ret; | 1598 | spin_unlock(*ptl); |
1599 | return NULL; | ||
1589 | } | 1600 | } |
1590 | 1601 | ||
1591 | static int __split_huge_page_splitting(struct page *page, | 1602 | static int __split_huge_page_splitting(struct page *page, |
@@ -1593,6 +1604,7 @@ static int __split_huge_page_splitting(struct page *page, | |||
1593 | unsigned long address) | 1604 | unsigned long address) |
1594 | { | 1605 | { |
1595 | struct mm_struct *mm = vma->vm_mm; | 1606 | struct mm_struct *mm = vma->vm_mm; |
1607 | spinlock_t *ptl; | ||
1596 | pmd_t *pmd; | 1608 | pmd_t *pmd; |
1597 | int ret = 0; | 1609 | int ret = 0; |
1598 | /* For mmu_notifiers */ | 1610 | /* For mmu_notifiers */ |
@@ -1600,9 +1612,8 @@ static int __split_huge_page_splitting(struct page *page, | |||
1600 | const unsigned long mmun_end = address + HPAGE_PMD_SIZE; | 1612 | const unsigned long mmun_end = address + HPAGE_PMD_SIZE; |
1601 | 1613 | ||
1602 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 1614 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
1603 | spin_lock(&mm->page_table_lock); | ||
1604 | pmd = page_check_address_pmd(page, mm, address, | 1615 | pmd = page_check_address_pmd(page, mm, address, |
1605 | PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); | 1616 | PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl); |
1606 | if (pmd) { | 1617 | if (pmd) { |
1607 | /* | 1618 | /* |
1608 | * We can't temporarily set the pmd to null in order | 1619 | * We can't temporarily set the pmd to null in order |
@@ -1613,8 +1624,8 @@ static int __split_huge_page_splitting(struct page *page, | |||
1613 | */ | 1624 | */ |
1614 | pmdp_splitting_flush(vma, address, pmd); | 1625 | pmdp_splitting_flush(vma, address, pmd); |
1615 | ret = 1; | 1626 | ret = 1; |
1627 | spin_unlock(ptl); | ||
1616 | } | 1628 | } |
1617 | spin_unlock(&mm->page_table_lock); | ||
1618 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 1629 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
1619 | 1630 | ||
1620 | return ret; | 1631 | return ret; |
@@ -1745,14 +1756,14 @@ static int __split_huge_page_map(struct page *page, | |||
1745 | unsigned long address) | 1756 | unsigned long address) |
1746 | { | 1757 | { |
1747 | struct mm_struct *mm = vma->vm_mm; | 1758 | struct mm_struct *mm = vma->vm_mm; |
1759 | spinlock_t *ptl; | ||
1748 | pmd_t *pmd, _pmd; | 1760 | pmd_t *pmd, _pmd; |
1749 | int ret = 0, i; | 1761 | int ret = 0, i; |
1750 | pgtable_t pgtable; | 1762 | pgtable_t pgtable; |
1751 | unsigned long haddr; | 1763 | unsigned long haddr; |
1752 | 1764 | ||
1753 | spin_lock(&mm->page_table_lock); | ||
1754 | pmd = page_check_address_pmd(page, mm, address, | 1765 | pmd = page_check_address_pmd(page, mm, address, |
1755 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); | 1766 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl); |
1756 | if (pmd) { | 1767 | if (pmd) { |
1757 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | 1768 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
1758 | pmd_populate(mm, &_pmd, pgtable); | 1769 | pmd_populate(mm, &_pmd, pgtable); |
@@ -1807,8 +1818,8 @@ static int __split_huge_page_map(struct page *page, | |||
1807 | pmdp_invalidate(vma, address, pmd); | 1818 | pmdp_invalidate(vma, address, pmd); |
1808 | pmd_populate(mm, pmd, pgtable); | 1819 | pmd_populate(mm, pmd, pgtable); |
1809 | ret = 1; | 1820 | ret = 1; |
1821 | spin_unlock(ptl); | ||
1810 | } | 1822 | } |
1811 | spin_unlock(&mm->page_table_lock); | ||
1812 | 1823 | ||
1813 | return ret; | 1824 | return ret; |
1814 | } | 1825 | } |