aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-01 16:43:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-01 16:43:38 -0400
commit8b11ec1b5ffb54f71cb5a5e5c8c4d36e5d113085 (patch)
tree906ec3b3f32af427c35f45b049cdc5ca3db1034f
parent53406ed1bcfdabe4b5bc35e6d17946c6f9f563e2 (diff)
mm: do not initialize TLB stack vma's with vma_init()
Commit 2c4541e24c55 ("mm: use vma_init() to initialize VMAs on stack and data segments") tried to initialize various left-over ad-hoc vma's "properly", but actually made things worse for the temporary vma's used for TLB flushing. vma_init() doesn't actually initialize all of the vma, just a few fields, so doing something like - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma; + + vma_init(&vma, tlb->mm); was actually very bad: instead of having a nicely initialized vma with every field but "vm_mm" zeroed, you'd have an entirely uninitialized vma with only a couple of fields initialized. And they weren't even fields that the code in question mostly cared about. The flush_tlb_range() function takes a "struct vma" rather than a "struct mm_struct", because a few architectures actually care about what kind of range it is - being able to only do an ITLB flush if it's a range that doesn't have data accesses enabled, for example. And all the normal users already have the vma for doing the range invalidation. But a few people want to call flush_tlb_range() with a range they just made up, so they also end up using a made-up vma. x86 just has a special "flush_tlb_mm_range()" function for this, but other architectures (arm and ia64) do the "use fake vma" thing instead, and thus got caught up in the vma_init() changes. At the same time, the TLB flushing code really doesn't care about most other fields in the vma, so vma_init() is just unnecessary and pointless. This fixes things by having an explicit "this is just an initializer for the TLB flush" initializer macro, which is used by the arm/arm64/ia64 people who mis-use this interface with just a dummy vma. Fixes: 2c4541e24c55 ("mm: use vma_init() to initialize VMAs on stack and data segments") Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/mach-rpc/ecard.c5
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/mm/hugetlbpage.c10
-rw-r--r--arch/ia64/include/asm/tlb.h7
-rw-r--r--include/linux/mm.h3
5 files changed, 12 insertions, 17 deletions
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 8db62cc54a6a..04b2f22c2739 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
212 */ 212 */
213static void ecard_init_pgtables(struct mm_struct *mm) 213static void ecard_init_pgtables(struct mm_struct *mm)
214{ 214{
215 struct vm_area_struct vma; 215 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
216 216
217 /* We want to set up the page tables for the following mapping: 217 /* We want to set up the page tables for the following mapping:
218 * Virtual Physical 218 * Virtual Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
237 237
238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
239 239
240 vma_init(&vma, mm);
241 vma.vm_flags = VM_EXEC;
242
243 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 240 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
244 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); 241 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
245} 242}
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index d87f2d646caa..0ad1cf233470 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,9 +37,7 @@ static inline void __tlb_remove_table(void *_table)
37 37
38static inline void tlb_flush(struct mmu_gather *tlb) 38static inline void tlb_flush(struct mmu_gather *tlb)
39{ 39{
40 struct vm_area_struct vma; 40 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
41
42 vma_init(&vma, tlb->mm);
43 41
44 /* 42 /*
45 * The ASID allocator will either invalidate the ASID or mark 43 * The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 1854e49aa18a..192b3ba07075 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,13 +108,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
108 unsigned long pgsize, 108 unsigned long pgsize,
109 unsigned long ncontig) 109 unsigned long ncontig)
110{ 110{
111 struct vm_area_struct vma;
112 pte_t orig_pte = huge_ptep_get(ptep); 111 pte_t orig_pte = huge_ptep_get(ptep);
113 bool valid = pte_valid(orig_pte); 112 bool valid = pte_valid(orig_pte);
114 unsigned long i, saddr = addr; 113 unsigned long i, saddr = addr;
115 114
116 vma_init(&vma, mm);
117
118 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { 115 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
119 pte_t pte = ptep_get_and_clear(mm, addr, ptep); 116 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
120 117
@@ -127,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
127 orig_pte = pte_mkdirty(orig_pte); 124 orig_pte = pte_mkdirty(orig_pte);
128 } 125 }
129 126
130 if (valid) 127 if (valid) {
128 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
131 flush_tlb_range(&vma, saddr, addr); 129 flush_tlb_range(&vma, saddr, addr);
130 }
132 return orig_pte; 131 return orig_pte;
133} 132}
134 133
@@ -147,10 +146,9 @@ static void clear_flush(struct mm_struct *mm,
147 unsigned long pgsize, 146 unsigned long pgsize,
148 unsigned long ncontig) 147 unsigned long ncontig)
149{ 148{
150 struct vm_area_struct vma; 149 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
151 unsigned long i, saddr = addr; 150 unsigned long i, saddr = addr;
152 151
153 vma_init(&vma, mm);
154 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 152 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
155 pte_clear(mm, addr, ptep); 153 pte_clear(mm, addr, ptep);
156 154
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index db89e7306081..516355a774bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
115 flush_tlb_all(); 115 flush_tlb_all();
116 } else { 116 } else {
117 /* 117 /*
118 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a 118 * flush_tlb_range() takes a vma instead of a mm pointer because
119 * vma pointer. 119 * some architectures want the vm_flags for ITLB/DTLB flush.
120 */ 120 */
121 struct vm_area_struct vma; 121 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
122 122
123 vma_init(&vma, tlb->mm);
124 /* flush the address range from the tlb: */ 123 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma, start, end); 124 flush_tlb_range(&vma, start, end);
126 /* now flush the virt. page-table area mapping the address range: */ 125 /* now flush the virt. page-table area mapping the address range: */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7ba6d356d18f..68a5121694ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -466,6 +466,9 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
466 vma->vm_ops = NULL; 466 vma->vm_ops = NULL;
467} 467}
468 468
469/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
470#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
471
469struct mmu_gather; 472struct mmu_gather;
470struct inode; 473struct inode;
471 474