diff options
author | Mel Gorman <mgorman@suse.de> | 2014-04-07 18:36:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-07 19:35:50 -0400 |
commit | 1ad9f620c3a22fa800489455ce517c29e576934e (patch) | |
tree | 1eb1db0166198b988983e6b59755ced828ad47fe /mm/mprotect.c | |
parent | 88a9ab6e3dfb5b10168130c255c6102c925343ab (diff) |
mm: numa: recheck for transhuge pages under lock during protection changes
Sasha reported the following bug using trinity
kernel BUG at mm/mprotect.c:149!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Dumping ftrace buffer:
(ftrace buffer empty)
Modules linked in:
CPU: 20 PID: 26219 Comm: trinity-c216 Tainted: G W 3.14.0-rc5-next-20140305-sasha-00011-ge06f5f3-dirty #105
task: ffff8800b6c80000 ti: ffff880228436000 task.ti: ffff880228436000
RIP: change_protection_range+0x3b3/0x500
Call Trace:
change_protection+0x25/0x30
change_prot_numa+0x1b/0x30
task_numa_work+0x279/0x360
task_work_run+0xae/0xf0
do_notify_resume+0x8e/0xe0
retint_signal+0x4d/0x92
The VM_BUG_ON was added in -mm by the patch "mm,numa: reorganize
change_pmd_range". The race existed without the patch but was just
harder to hit.
The problem is that a transhuge check is made without holding the PTL.
It's possible at the time of the check that a parallel fault clears the
pmd and inserts a new one which then triggers the VM_BUG_ON check. This
patch removes the VM_BUG_ON but fixes the race by rechecking transhuge
under the PTL when marking page tables for NUMA hinting and bailing if a
race occurred. It is not a problem for calls to mprotect() as they hold
mmap_sem for write.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r-- | mm/mprotect.c | 36 |
1 files changed, 34 insertions, 2 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c index 79cb51866e02..2c51c79c8a69 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -36,6 +36,34 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
36 | } | 36 | } |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | /* | ||
40 | * For a prot_numa update we only hold mmap_sem for read so there is a | ||
41 | * potential race with faulting where a pmd was temporarily none. This | ||
42 | * function checks for a transhuge pmd under the appropriate lock. It | ||
43 | * returns a pte if it was successfully locked or NULL if it raced with | ||
44 | * a transhuge insertion. | ||
45 | */ | ||
46 | static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, | ||
47 | unsigned long addr, int prot_numa, spinlock_t **ptl) | ||
48 | { | ||
49 | pte_t *pte; | ||
50 | spinlock_t *pmdl; | ||
51 | |||
52 | /* !prot_numa is protected by mmap_sem held for write */ | ||
53 | if (!prot_numa) | ||
54 | return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | ||
55 | |||
56 | pmdl = pmd_lock(vma->vm_mm, pmd); | ||
57 | if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { | ||
58 | spin_unlock(pmdl); | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); | ||
63 | spin_unlock(pmdl); | ||
64 | return pte; | ||
65 | } | ||
66 | |||
39 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 67 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
40 | unsigned long addr, unsigned long end, pgprot_t newprot, | 68 | unsigned long addr, unsigned long end, pgprot_t newprot, |
41 | int dirty_accountable, int prot_numa) | 69 | int dirty_accountable, int prot_numa) |
@@ -45,7 +73,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
45 | spinlock_t *ptl; | 73 | spinlock_t *ptl; |
46 | unsigned long pages = 0; | 74 | unsigned long pages = 0; |
47 | 75 | ||
48 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 76 | pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); |
77 | if (!pte) | ||
78 | return 0; | ||
79 | |||
49 | arch_enter_lazy_mmu_mode(); | 80 | arch_enter_lazy_mmu_mode(); |
50 | do { | 81 | do { |
51 | oldpte = *pte; | 82 | oldpte = *pte; |
@@ -132,12 +163,13 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, | |||
132 | pages += HPAGE_PMD_NR; | 163 | pages += HPAGE_PMD_NR; |
133 | nr_huge_updates++; | 164 | nr_huge_updates++; |
134 | } | 165 | } |
166 | |||
167 | /* huge pmd was handled */ | ||
135 | continue; | 168 | continue; |
136 | } | 169 | } |
137 | } | 170 | } |
138 | /* fall through, the trans huge pmd just split */ | 171 | /* fall through, the trans huge pmd just split */ |
139 | } | 172 | } |
140 | VM_BUG_ON(pmd_trans_huge(*pmd)); | ||
141 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, | 173 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
142 | dirty_accountable, prot_numa); | 174 | dirty_accountable, prot_numa); |
143 | pages += this_pages; | 175 | pages += this_pages; |