summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-07-26 19:37:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-26 22:38:03 -0400
commit2c4541e24c55e2847bede93e33d749280edd429a (patch)
treec0d440dee0cd3129e8c7a0d182407c6804543c51
parent027232da7c7c1c7f04383f93bd798e475dde5285 (diff)
mm: use vma_init() to initialize VMAs on stack and data segments
Make sure to initialize all VMAs properly, not only those which come from vm_area_cachep. Link: http://lkml.kernel.org/r/20180724121139.62570-3-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/mach-rpc/ecard.c2
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/shmem.c1
10 files changed, 17 insertions, 7 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 225d1c58d2de..d9c299133111 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
338 338
339static int __init gate_vma_init(void) 339static int __init gate_vma_init(void)
340{ 340{
341 vma_init(&gate_vma, NULL);
341 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 342 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
342 return 0; 343 return 0;
343} 344}
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 39aef4876ed4..8db62cc54a6a 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct mm_struct *mm)
237 237
238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
239 239
240 vma_init(&vma, mm);
240 vma.vm_flags = VM_EXEC; 241 vma.vm_flags = VM_EXEC;
241 vma.vm_mm = mm;
242 242
243 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 243 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
244 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); 244 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..d87f2d646caa 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,7 +37,9 @@ static inline void __tlb_remove_table(void *_table)
37 37
38static inline void tlb_flush(struct mmu_gather *tlb) 38static inline void tlb_flush(struct mmu_gather *tlb)
39{ 39{
40 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; 40 struct vm_area_struct vma;
41
42 vma_init(&vma, tlb->mm);
41 43
42 /* 44 /*
43 * The ASID allocator will either invalidate the ASID or mark 45 * The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index ecc6818191df..1854e49aa18a 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_struct *mm,
108 unsigned long pgsize, 108 unsigned long pgsize,
109 unsigned long ncontig) 109 unsigned long ncontig)
110{ 110{
111 struct vm_area_struct vma = { .vm_mm = mm }; 111 struct vm_area_struct vma;
112 pte_t orig_pte = huge_ptep_get(ptep); 112 pte_t orig_pte = huge_ptep_get(ptep);
113 bool valid = pte_valid(orig_pte); 113 bool valid = pte_valid(orig_pte);
114 unsigned long i, saddr = addr; 114 unsigned long i, saddr = addr;
115 115
116 vma_init(&vma, mm);
117
116 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { 118 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
117 pte_t pte = ptep_get_and_clear(mm, addr, ptep); 119 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
118 120
@@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct *mm,
145 unsigned long pgsize, 147 unsigned long pgsize,
146 unsigned long ncontig) 148 unsigned long ncontig)
147{ 149{
148 struct vm_area_struct vma = { .vm_mm = mm }; 150 struct vm_area_struct vma;
149 unsigned long i, saddr = addr; 151 unsigned long i, saddr = addr;
150 152
153 vma_init(&vma, mm);
151 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 154 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
152 pte_clear(mm, addr, ptep); 155 pte_clear(mm, addr, ptep);
153 156
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 44f0ac0df308..db89e7306081 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
120 */ 120 */
121 struct vm_area_struct vma; 121 struct vm_area_struct vma;
122 122
123 vma.vm_mm = tlb->mm; 123 vma_init(&vma, tlb->mm);
124 /* flush the address range from the tlb: */ 124 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma, start, end); 125 flush_tlb_range(&vma, start, end);
126 /* now flush the virt. page-table area mapping the address range: */ 126 /* now flush the virt. page-table area mapping the address range: */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index bdb14a369137..e6c6dfd98de2 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -273,7 +273,7 @@ static struct vm_area_struct gate_vma;
273 273
274static int __init gate_vma_init(void) 274static int __init gate_vma_init(void)
275{ 275{
276 gate_vma.vm_mm = NULL; 276 vma_init(&gate_vma, NULL);
277 gate_vma.vm_start = FIXADDR_USER_START; 277 gate_vma.vm_start = FIXADDR_USER_START;
278 gate_vma.vm_end = FIXADDR_USER_END; 278 gate_vma.vm_end = FIXADDR_USER_END;
279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 744afdc18cf3..56c44d865f7b 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
16 if (!FIXADDR_USER_START) 16 if (!FIXADDR_USER_START)
17 return 0; 17 return 0;
18 18
19 gate_vma.vm_mm = NULL; 19 vma_init(&gate_vma, NULL);
20 gate_vma.vm_start = FIXADDR_USER_START; 20 gate_vma.vm_start = FIXADDR_USER_START;
21 gate_vma.vm_end = FIXADDR_USER_END; 21 gate_vma.vm_end = FIXADDR_USER_END;
22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d508c7844681..40d4c66c7751 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
411 bool truncate_op = (lend == LLONG_MAX); 411 bool truncate_op = (lend == LLONG_MAX);
412 412
413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
414 vma_init(&pseudo_vma, current->mm);
414 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 415 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
415 pagevec_init(&pvec); 416 pagevec_init(&pvec);
416 next = start; 417 next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
595 * as input to create an allocation policy. 596 * as input to create an allocation policy.
596 */ 597 */
597 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 598 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
599 vma_init(&pseudo_vma, mm);
598 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 600 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
599 pseudo_vma.vm_file = file; 601 pseudo_vma.vm_file = file;
600 602
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9ac49ef17b4e..01f1a14facc4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2505 2505
2506 /* Create pseudo-vma that contains just the policy */ 2506 /* Create pseudo-vma that contains just the policy */
2507 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2507 memset(&pvma, 0, sizeof(struct vm_area_struct));
2508 vma_init(&pvma, NULL);
2508 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2509 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2509 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2510 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2510 2511
diff --git a/mm/shmem.c b/mm/shmem.c
index 2cab84403055..41b9bbf24e16 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1421{ 1421{
1422 /* Create a pseudo vma that just contains the policy */ 1422 /* Create a pseudo vma that just contains the policy */
1423 memset(vma, 0, sizeof(*vma)); 1423 memset(vma, 0, sizeof(*vma));
1424 vma_init(vma, NULL);
1424 /* Bias interleave by inode number to distribute better across nodes */ 1425 /* Bias interleave by inode number to distribute better across nodes */
1425 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1426 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1426 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1427 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);