diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2018-12-28 03:39:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:51 -0500 |
commit | b43a9990055958e70347c56f90ea2ae32c67334c (patch) | |
tree | 91f90f0c3e73ca076cbc4a9780bd7d5a271b6257 /mm/memory-failure.c | |
parent | 1ecc07fd0a6d350bbf4dc176e0d654661b304a30 (diff) |
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
While looking at BUGs associated with invalid huge page map counts, it was
discovered and observed that a huge pte pointer could become 'invalid' and
point to another task's page table. Consider the following:
A task takes a page fault on a shared hugetlbfs file and calls
huge_pte_alloc to get a ptep. Suppose the returned ptep points to a
shared pmd.
Now, another task truncates the hugetlbfs file. As part of truncation, it
unmaps everyone who has the file mapped. If the range being truncated is
covered by a shared pmd, huge_pmd_unshare will be called. For all but the
last user of the shared pmd, huge_pmd_unshare will clear the pud pointing
to the pmd. If the task in the middle of the page fault is not the last
user, the ptep returned by huge_pte_alloc now points to another task's
page table or worse. This leads to bad things such as incorrect page
map/reference counts or invalid memory references.
To fix, expand the use of i_mmap_rwsem as follows:
- i_mmap_rwsem is held in read mode whenever huge_pmd_share is called.
huge_pmd_share is only called via huge_pte_alloc, so callers of
huge_pte_alloc take i_mmap_rwsem before calling. In addition, callers
of huge_pte_alloc continue to hold the semaphore until finished with the
ptep.
- i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is
called.
[mike.kravetz@oracle.com: add explicit check for mapping != null]
Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com
Fixes: 39dde65c9940 ("shared page table for hugetlb page")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r-- | mm/memory-failure.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7c72f2a95785..6379fff1a5ff 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; | 966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; |
967 | struct address_space *mapping; | 967 | struct address_space *mapping; |
968 | LIST_HEAD(tokill); | 968 | LIST_HEAD(tokill); |
969 | bool unmap_success; | 969 | bool unmap_success = true; |
970 | int kill = 1, forcekill; | 970 | int kill = 1, forcekill; |
971 | struct page *hpage = *hpagep; | 971 | struct page *hpage = *hpagep; |
972 | bool mlocked = PageMlocked(hpage); | 972 | bool mlocked = PageMlocked(hpage); |
@@ -1028,7 +1028,19 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
1028 | if (kill) | 1028 | if (kill) |
1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); | 1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); |
1030 | 1030 | ||
1031 | unmap_success = try_to_unmap(hpage, ttu); | 1031 | if (!PageHuge(hpage)) { |
1032 | unmap_success = try_to_unmap(hpage, ttu); | ||
1033 | } else if (mapping) { | ||
1034 | /* | ||
1035 | * For hugetlb pages, try_to_unmap could potentially call | ||
1036 | * huge_pmd_unshare. Because of this, take semaphore in | ||
1037 | * write mode here and set TTU_RMAP_LOCKED to indicate we | ||
1038 | * have taken the lock at this higer level. | ||
1039 | */ | ||
1040 | i_mmap_lock_write(mapping); | ||
1041 | unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); | ||
1042 | i_mmap_unlock_write(mapping); | ||
1043 | } | ||
1032 | if (!unmap_success) | 1044 | if (!unmap_success) |
1033 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", | 1045 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", |
1034 | pfn, page_mapcount(hpage)); | 1046 | pfn, page_mapcount(hpage)); |