diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-04-13 17:56:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-04-13 21:24:20 -0400 |
commit | 0a85e51d37645e9ce57e5e1a30859e07810ed07c (patch) | |
tree | b56010bad2b9f938904f3fc10b9b7e9bbe21e81e /mm/huge_memory.c | |
parent | 76e32a2a084ed71b48179023cd8fdb3787c8a6ad (diff) |
thp: reduce indentation level in change_huge_pmd()
Patch series "thp: fix few MADV_DONTNEED races"
For MADV_DONTNEED to work properly with huge pages, it's critical to not
clear pmd intermittently unless you hold down_write(mmap_sem).
Otherwise MADV_DONTNEED can miss the THP which can lead to userspace
breakage.
See example of such race in commit message of patch 2/4.
All these races are found by code inspection. I haven't seen them
triggered. I don't think it's worth to apply them to stable@.
This patch (of 4):
Restructure code in preparation for a fix.
Link: http://lkml.kernel.org/r/20170302151034.27829-2-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fef4cf210cc7..a513861a9037 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1724,37 +1724,37 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1724 | { | 1724 | { |
1725 | struct mm_struct *mm = vma->vm_mm; | 1725 | struct mm_struct *mm = vma->vm_mm; |
1726 | spinlock_t *ptl; | 1726 | spinlock_t *ptl; |
1727 | int ret = 0; | 1727 | pmd_t entry; |
1728 | bool preserve_write; | ||
1729 | int ret; | ||
1728 | 1730 | ||
1729 | ptl = __pmd_trans_huge_lock(pmd, vma); | 1731 | ptl = __pmd_trans_huge_lock(pmd, vma); |
1730 | if (ptl) { | 1732 | if (!ptl) |
1731 | pmd_t entry; | 1733 | return 0; |
1732 | bool preserve_write = prot_numa && pmd_write(*pmd); | ||
1733 | ret = 1; | ||
1734 | 1734 | ||
1735 | /* | 1735 | preserve_write = prot_numa && pmd_write(*pmd); |
1736 | * Avoid trapping faults against the zero page. The read-only | 1736 | ret = 1; |
1737 | * data is likely to be read-cached on the local CPU and | ||
1738 | * local/remote hits to the zero page are not interesting. | ||
1739 | */ | ||
1740 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | ||
1741 | spin_unlock(ptl); | ||
1742 | return ret; | ||
1743 | } | ||
1744 | 1737 | ||
1745 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1738 | /* |
1746 | entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); | 1739 | * Avoid trapping faults against the zero page. The read-only |
1747 | entry = pmd_modify(entry, newprot); | 1740 | * data is likely to be read-cached on the local CPU and |
1748 | if (preserve_write) | 1741 | * local/remote hits to the zero page are not interesting. |
1749 | entry = pmd_mk_savedwrite(entry); | 1742 | */ |
1750 | ret = HPAGE_PMD_NR; | 1743 | if (prot_numa && is_huge_zero_pmd(*pmd)) |
1751 | set_pmd_at(mm, addr, pmd, entry); | 1744 | goto unlock; |
1752 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && | ||
1753 | pmd_write(entry)); | ||
1754 | } | ||
1755 | spin_unlock(ptl); | ||
1756 | } | ||
1757 | 1745 | ||
1746 | if (prot_numa && pmd_protnone(*pmd)) | ||
1747 | goto unlock; | ||
1748 | |||
1749 | entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); | ||
1750 | entry = pmd_modify(entry, newprot); | ||
1751 | if (preserve_write) | ||
1752 | entry = pmd_mk_savedwrite(entry); | ||
1753 | ret = HPAGE_PMD_NR; | ||
1754 | set_pmd_at(mm, addr, pmd, entry); | ||
1755 | BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); | ||
1756 | unlock: | ||
1757 | spin_unlock(ptl); | ||
1758 | return ret; | 1758 | return ret; |
1759 | } | 1759 | } |
1760 | 1760 | ||