aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm64/kernel/vdso.c6
-rw-r--r--arch/hexagon/kernel/vdso.c3
-rw-r--r--arch/mips/kernel/vdso.c3
-rw-r--r--arch/powerpc/kernel/vdso.c3
-rw-r--r--arch/s390/kernel/vdso.c3
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c4
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--arch/x86/um/vdso/vma.c3
9 files changed, 21 insertions, 10 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a647d6642f3e..4a803c5a1ff7 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -420,7 +420,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
420 npages = 1; /* for sigpage */ 420 npages = 1; /* for sigpage */
421 npages += vdso_total_pages; 421 npages += vdso_total_pages;
422 422
423 down_write(&mm->mmap_sem); 423 if (down_write_killable(&mm->mmap_sem))
424 return -EINTR;
424 hint = sigpage_addr(mm, npages); 425 hint = sigpage_addr(mm, npages);
425 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); 426 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
426 if (IS_ERR_VALUE(addr)) { 427 if (IS_ERR_VALUE(addr)) {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 64fc030be0f2..9fefb005812a 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -95,7 +95,8 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
95 }; 95 };
96 void *ret; 96 void *ret;
97 97
98 down_write(&mm->mmap_sem); 98 if (down_write_killable(&mm->mmap_sem))
99 return -EINTR;
99 current->mm->context.vdso = (void *)addr; 100 current->mm->context.vdso = (void *)addr;
100 101
101 /* Map vectors page at the high address. */ 102 /* Map vectors page at the high address. */
@@ -163,7 +164,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
163 /* Be sure to map the data page */ 164 /* Be sure to map the data page */
164 vdso_mapping_len = vdso_text_len + PAGE_SIZE; 165 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
165 166
166 down_write(&mm->mmap_sem); 167 if (down_write_killable(&mm->mmap_sem))
168 return -EINTR;
167 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 169 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
168 if (IS_ERR_VALUE(vdso_base)) { 170 if (IS_ERR_VALUE(vdso_base)) {
169 ret = ERR_PTR(vdso_base); 171 ret = ERR_PTR(vdso_base);
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 0bf5a87e4d0a..3ea968415539 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -65,7 +65,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
65 unsigned long vdso_base; 65 unsigned long vdso_base;
66 struct mm_struct *mm = current->mm; 66 struct mm_struct *mm = current->mm;
67 67
68 down_write(&mm->mmap_sem); 68 if (down_write_killable(&mm->mmap_sem))
69 return -EINTR;
69 70
70 /* Try to get it loaded right near ld.so/glibc. */ 71 /* Try to get it loaded right near ld.so/glibc. */
71 vdso_base = STACK_TOP; 72 vdso_base = STACK_TOP;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 975e99759bab..54e1663ce639 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -104,7 +104,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
104 struct resource gic_res; 104 struct resource gic_res;
105 int ret; 105 int ret;
106 106
107 down_write(&mm->mmap_sem); 107 if (down_write_killable(&mm->mmap_sem))
108 return -EINTR;
108 109
109 /* 110 /*
110 * Determine total area size. This includes the VDSO data itself, the 111 * Determine total area size. This includes the VDSO data itself, the
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index def1b8b5e6c1..6767605ea8da 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -195,7 +195,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
195 * and end up putting it elsewhere. 195 * and end up putting it elsewhere.
196 * Add enough to the size so that the result can be aligned. 196 * Add enough to the size so that the result can be aligned.
197 */ 197 */
198 down_write(&mm->mmap_sem); 198 if (down_write_killable(&mm->mmap_sem))
199 return -EINTR;
199 vdso_base = get_unmapped_area(NULL, vdso_base, 200 vdso_base = get_unmapped_area(NULL, vdso_base,
200 (vdso_pages << PAGE_SHIFT) + 201 (vdso_pages << PAGE_SHIFT) +
201 ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 202 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 94495cac8be3..5904abf6b1ae 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -216,7 +216,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
216 * it at vdso_base which is the "natural" base for it, but we might 216 * it at vdso_base which is the "natural" base for it, but we might
217 * fail and end up putting it elsewhere. 217 * fail and end up putting it elsewhere.
218 */ 218 */
219 down_write(&mm->mmap_sem); 219 if (down_write_killable(&mm->mmap_sem))
220 return -EINTR;
220 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); 221 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
221 if (IS_ERR_VALUE(vdso_base)) { 222 if (IS_ERR_VALUE(vdso_base)) {
222 rc = vdso_base; 223 rc = vdso_base;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index ea2aa1393b87..cc0cc5b4ff18 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -64,7 +64,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
64 unsigned long addr; 64 unsigned long addr;
65 int ret; 65 int ret;
66 66
67 down_write(&mm->mmap_sem); 67 if (down_write_killable(&mm->mmap_sem))
68 return -EINTR;
69
68 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); 70 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
69 if (IS_ERR_VALUE(addr)) { 71 if (IS_ERR_VALUE(addr)) {
70 ret = addr; 72 ret = addr;
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index b3cf81333a54..ab220ac9b3b9 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -163,7 +163,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
163 addr = 0; 163 addr = 0;
164 } 164 }
165 165
166 down_write(&mm->mmap_sem); 166 if (down_write_killable(&mm->mmap_sem))
167 return -EINTR;
167 168
168 addr = get_unmapped_area(NULL, addr, 169 addr = get_unmapped_area(NULL, addr,
169 image->size - image->sym_vvar_start, 0, 0); 170 image->size - image->sym_vvar_start, 0, 0);
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 237c6831e095..6be22f991b59 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -61,7 +61,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
61 if (!vdso_enabled) 61 if (!vdso_enabled)
62 return 0; 62 return 0;
63 63
64 down_write(&mm->mmap_sem); 64 if (down_write_killable(&mm->mmap_sem))
65 return -EINTR;
65 66
66 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, 67 err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
67 VM_READ|VM_EXEC| 68 VM_READ|VM_EXEC|