diff options
author | Hugh Dickins <hughd@google.com> | 2014-06-23 16:22:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-23 19:47:44 -0400 |
commit | 5338a9372234f8b782c7d78f0355e1cb21d02468 (patch) | |
tree | 038ed1c19e7434f0ec4c6556b7367798348882ec /mm | |
parent | ed235875e2ca983197831337a986f0517074e1a0 (diff) |
mm: thp: fix DEBUG_PAGEALLOC oops in copy_page_rep()
Trinity has for over a year been reporting a CONFIG_DEBUG_PAGEALLOC oops
in copy_page_rep() called from copy_user_huge_page() called from
do_huge_pmd_wp_page().
I believe this is a DEBUG_PAGEALLOC false positive, due to the source
page being split, and a tail page freed, while copy is in progress; and
not a problem without DEBUG_PAGEALLOC, since the pmd_same() check will
prevent a miscopy from being made visible.
Fix by adding get_user_huge_page() and put_user_huge_page(): reducing to
the usual get_page() and put_page() on head page in the usual config;
but get and put references to all of the tail pages when
DEBUG_PAGEALLOC.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 39 |
1 files changed, 35 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e60837dc785c..bade35ef563b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -941,6 +941,37 @@ unlock: | |||
941 | spin_unlock(ptl); | 941 | spin_unlock(ptl); |
942 | } | 942 | } |
943 | 943 | ||
944 | /* | ||
945 | * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages | ||
946 | * during copy_user_huge_page()'s copy_page_rep(): in the case when | ||
947 | * the source page gets split and a tail freed before copy completes. | ||
948 | * Called under pmd_lock of checked pmd, so safe from splitting itself. | ||
949 | */ | ||
950 | static void get_user_huge_page(struct page *page) | ||
951 | { | ||
952 | if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { | ||
953 | struct page *endpage = page + HPAGE_PMD_NR; | ||
954 | |||
955 | atomic_add(HPAGE_PMD_NR, &page->_count); | ||
956 | while (++page < endpage) | ||
957 | get_huge_page_tail(page); | ||
958 | } else { | ||
959 | get_page(page); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | static void put_user_huge_page(struct page *page) | ||
964 | { | ||
965 | if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { | ||
966 | struct page *endpage = page + HPAGE_PMD_NR; | ||
967 | |||
968 | while (page < endpage) | ||
969 | put_page(page++); | ||
970 | } else { | ||
971 | put_page(page); | ||
972 | } | ||
973 | } | ||
974 | |||
944 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | 975 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, |
945 | struct vm_area_struct *vma, | 976 | struct vm_area_struct *vma, |
946 | unsigned long address, | 977 | unsigned long address, |
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1074 | ret |= VM_FAULT_WRITE; | 1105 | ret |= VM_FAULT_WRITE; |
1075 | goto out_unlock; | 1106 | goto out_unlock; |
1076 | } | 1107 | } |
1077 | get_page(page); | 1108 | get_user_huge_page(page); |
1078 | spin_unlock(ptl); | 1109 | spin_unlock(ptl); |
1079 | alloc: | 1110 | alloc: |
1080 | if (transparent_hugepage_enabled(vma) && | 1111 | if (transparent_hugepage_enabled(vma) && |
@@ -1095,7 +1126,7 @@ alloc: | |||
1095 | split_huge_page(page); | 1126 | split_huge_page(page); |
1096 | ret |= VM_FAULT_FALLBACK; | 1127 | ret |= VM_FAULT_FALLBACK; |
1097 | } | 1128 | } |
1098 | put_page(page); | 1129 | put_user_huge_page(page); |
1099 | } | 1130 | } |
1100 | count_vm_event(THP_FAULT_FALLBACK); | 1131 | count_vm_event(THP_FAULT_FALLBACK); |
1101 | goto out; | 1132 | goto out; |
@@ -1105,7 +1136,7 @@ alloc: | |||
1105 | put_page(new_page); | 1136 | put_page(new_page); |
1106 | if (page) { | 1137 | if (page) { |
1107 | split_huge_page(page); | 1138 | split_huge_page(page); |
1108 | put_page(page); | 1139 | put_user_huge_page(page); |
1109 | } else | 1140 | } else |
1110 | split_huge_page_pmd(vma, address, pmd); | 1141 | split_huge_page_pmd(vma, address, pmd); |
1111 | ret |= VM_FAULT_FALLBACK; | 1142 | ret |= VM_FAULT_FALLBACK; |
@@ -1127,7 +1158,7 @@ alloc: | |||
1127 | 1158 | ||
1128 | spin_lock(ptl); | 1159 | spin_lock(ptl); |
1129 | if (page) | 1160 | if (page) |
1130 | put_page(page); | 1161 | put_user_huge_page(page); |
1131 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { | 1162 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { |
1132 | spin_unlock(ptl); | 1163 | spin_unlock(ptl); |
1133 | mem_cgroup_uncharge_page(new_page); | 1164 | mem_cgroup_uncharge_page(new_page); |