diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 20:29:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 20:29:01 -0400 |
commit | 8e4ef6386703835f91898334b72e48649646ec00 (patch) | |
tree | d425944eaf774d48b676f32e523ecfddd4a49636 | |
parent | 6aebe7f9e8697531a11b007d1e8126ba1b6e0a53 (diff) | |
parent | 6e68b08728ce3365c713f8663c6b05a79e2bbca1 (diff) |
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso updates from Ingo Molnar:
"The main changes in this cycle centered around adding support for
32-bit compatible C/R of the vDSO on 64-bit kernels, by Dmitry
Safonov"
* 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/vdso: Use CONFIG_X86_X32_ABI to enable vdso prctl
x86/vdso: Only define map_vdso_randomized() if CONFIG_X86_64
x86/vdso: Only define prctl_map_vdso() if CONFIG_CHECKPOINT_RESTORE
x86/signal: Add SA_{X32,IA32}_ABI sa_flags
x86/ptrace: Down with test_thread_flag(TIF_IA32)
x86/coredump: Use pr_reg size, rather that TIF_IA32 flag
x86/arch_prctl/vdso: Add ARCH_MAP_VDSO_*
x86/vdso: Replace calculate_addr in map_vdso() with addr
x86/vdso: Unmap vdso blob on vvar mapping failure
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 175 | ||||
-rw-r--r-- | arch/x86/ia32/ia32_signal.c | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/compat.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu/signal.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/signal.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/vdso.h | 2 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/prctl.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/signal_compat.c | 34 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 23 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | kernel/signal.c | 7 | ||||
-rw-r--r-- | mm/mmap.c | 8 |
15 files changed, 220 insertions, 106 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index f840766659a8..23c881caabd1 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c | |||
@@ -37,54 +37,6 @@ void __init init_vdso_image(const struct vdso_image *image) | |||
37 | 37 | ||
38 | struct linux_binprm; | 38 | struct linux_binprm; |
39 | 39 | ||
40 | /* | ||
41 | * Put the vdso above the (randomized) stack with another randomized | ||
42 | * offset. This way there is no hole in the middle of address space. | ||
43 | * To save memory make sure it is still in the same PTE as the stack | ||
44 | * top. This doesn't give that many random bits. | ||
45 | * | ||
46 | * Note that this algorithm is imperfect: the distribution of the vdso | ||
47 | * start address within a PMD is biased toward the end. | ||
48 | * | ||
49 | * Only used for the 64-bit and x32 vdsos. | ||
50 | */ | ||
51 | static unsigned long vdso_addr(unsigned long start, unsigned len) | ||
52 | { | ||
53 | #ifdef CONFIG_X86_32 | ||
54 | return 0; | ||
55 | #else | ||
56 | unsigned long addr, end; | ||
57 | unsigned offset; | ||
58 | |||
59 | /* | ||
60 | * Round up the start address. It can start out unaligned as a result | ||
61 | * of stack start randomization. | ||
62 | */ | ||
63 | start = PAGE_ALIGN(start); | ||
64 | |||
65 | /* Round the lowest possible end address up to a PMD boundary. */ | ||
66 | end = (start + len + PMD_SIZE - 1) & PMD_MASK; | ||
67 | if (end >= TASK_SIZE_MAX) | ||
68 | end = TASK_SIZE_MAX; | ||
69 | end -= len; | ||
70 | |||
71 | if (end > start) { | ||
72 | offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); | ||
73 | addr = start + (offset << PAGE_SHIFT); | ||
74 | } else { | ||
75 | addr = start; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Forcibly align the final address in case we have a hardware | ||
80 | * issue that requires alignment for performance reasons. | ||
81 | */ | ||
82 | addr = align_vdso_addr(addr); | ||
83 | |||
84 | return addr; | ||
85 | #endif | ||
86 | } | ||
87 | |||
88 | static int vdso_fault(const struct vm_special_mapping *sm, | 40 | static int vdso_fault(const struct vm_special_mapping *sm, |
89 | struct vm_area_struct *vma, struct vm_fault *vmf) | 41 | struct vm_area_struct *vma, struct vm_fault *vmf) |
90 | { | 42 | { |
@@ -176,30 +128,28 @@ static int vvar_fault(const struct vm_special_mapping *sm, | |||
176 | return VM_FAULT_SIGBUS; | 128 | return VM_FAULT_SIGBUS; |
177 | } | 129 | } |
178 | 130 | ||
179 | static int map_vdso(const struct vdso_image *image, bool calculate_addr) | 131 | static const struct vm_special_mapping vdso_mapping = { |
132 | .name = "[vdso]", | ||
133 | .fault = vdso_fault, | ||
134 | .mremap = vdso_mremap, | ||
135 | }; | ||
136 | static const struct vm_special_mapping vvar_mapping = { | ||
137 | .name = "[vvar]", | ||
138 | .fault = vvar_fault, | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * Add vdso and vvar mappings to current process. | ||
143 | * @image - blob to map | ||
144 | * @addr - request a specific address (zero to map at free addr) | ||
145 | */ | ||
146 | static int map_vdso(const struct vdso_image *image, unsigned long addr) | ||
180 | { | 147 | { |
181 | struct mm_struct *mm = current->mm; | 148 | struct mm_struct *mm = current->mm; |
182 | struct vm_area_struct *vma; | 149 | struct vm_area_struct *vma; |
183 | unsigned long addr, text_start; | 150 | unsigned long text_start; |
184 | int ret = 0; | 151 | int ret = 0; |
185 | 152 | ||
186 | static const struct vm_special_mapping vdso_mapping = { | ||
187 | .name = "[vdso]", | ||
188 | .fault = vdso_fault, | ||
189 | .mremap = vdso_mremap, | ||
190 | }; | ||
191 | static const struct vm_special_mapping vvar_mapping = { | ||
192 | .name = "[vvar]", | ||
193 | .fault = vvar_fault, | ||
194 | }; | ||
195 | |||
196 | if (calculate_addr) { | ||
197 | addr = vdso_addr(current->mm->start_stack, | ||
198 | image->size - image->sym_vvar_start); | ||
199 | } else { | ||
200 | addr = 0; | ||
201 | } | ||
202 | |||
203 | if (down_write_killable(&mm->mmap_sem)) | 153 | if (down_write_killable(&mm->mmap_sem)) |
204 | return -EINTR; | 154 | return -EINTR; |
205 | 155 | ||
@@ -238,24 +188,104 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) | |||
238 | 188 | ||
239 | if (IS_ERR(vma)) { | 189 | if (IS_ERR(vma)) { |
240 | ret = PTR_ERR(vma); | 190 | ret = PTR_ERR(vma); |
241 | goto up_fail; | 191 | do_munmap(mm, text_start, image->size); |
242 | } | 192 | } |
243 | 193 | ||
244 | up_fail: | 194 | up_fail: |
245 | if (ret) | 195 | if (ret) { |
246 | current->mm->context.vdso = NULL; | 196 | current->mm->context.vdso = NULL; |
197 | current->mm->context.vdso_image = NULL; | ||
198 | } | ||
247 | 199 | ||
248 | up_write(&mm->mmap_sem); | 200 | up_write(&mm->mmap_sem); |
249 | return ret; | 201 | return ret; |
250 | } | 202 | } |
251 | 203 | ||
204 | #ifdef CONFIG_X86_64 | ||
205 | /* | ||
206 | * Put the vdso above the (randomized) stack with another randomized | ||
207 | * offset. This way there is no hole in the middle of address space. | ||
208 | * To save memory make sure it is still in the same PTE as the stack | ||
209 | * top. This doesn't give that many random bits. | ||
210 | * | ||
211 | * Note that this algorithm is imperfect: the distribution of the vdso | ||
212 | * start address within a PMD is biased toward the end. | ||
213 | * | ||
214 | * Only used for the 64-bit and x32 vdsos. | ||
215 | */ | ||
216 | static unsigned long vdso_addr(unsigned long start, unsigned len) | ||
217 | { | ||
218 | unsigned long addr, end; | ||
219 | unsigned offset; | ||
220 | |||
221 | /* | ||
222 | * Round up the start address. It can start out unaligned as a result | ||
223 | * of stack start randomization. | ||
224 | */ | ||
225 | start = PAGE_ALIGN(start); | ||
226 | |||
227 | /* Round the lowest possible end address up to a PMD boundary. */ | ||
228 | end = (start + len + PMD_SIZE - 1) & PMD_MASK; | ||
229 | if (end >= TASK_SIZE_MAX) | ||
230 | end = TASK_SIZE_MAX; | ||
231 | end -= len; | ||
232 | |||
233 | if (end > start) { | ||
234 | offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); | ||
235 | addr = start + (offset << PAGE_SHIFT); | ||
236 | } else { | ||
237 | addr = start; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * Forcibly align the final address in case we have a hardware | ||
242 | * issue that requires alignment for performance reasons. | ||
243 | */ | ||
244 | addr = align_vdso_addr(addr); | ||
245 | |||
246 | return addr; | ||
247 | } | ||
248 | |||
249 | static int map_vdso_randomized(const struct vdso_image *image) | ||
250 | { | ||
251 | unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start); | ||
252 | |||
253 | return map_vdso(image, addr); | ||
254 | } | ||
255 | #endif | ||
256 | |||
257 | int map_vdso_once(const struct vdso_image *image, unsigned long addr) | ||
258 | { | ||
259 | struct mm_struct *mm = current->mm; | ||
260 | struct vm_area_struct *vma; | ||
261 | |||
262 | down_write(&mm->mmap_sem); | ||
263 | /* | ||
264 | * Check if we have already mapped vdso blob - fail to prevent | ||
265 | * abusing from userspace install_speciall_mapping, which may | ||
266 | * not do accounting and rlimit right. | ||
267 | * We could search vma near context.vdso, but it's a slowpath, | ||
268 | * so let's explicitely check all VMAs to be completely sure. | ||
269 | */ | ||
270 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
271 | if (vma_is_special_mapping(vma, &vdso_mapping) || | ||
272 | vma_is_special_mapping(vma, &vvar_mapping)) { | ||
273 | up_write(&mm->mmap_sem); | ||
274 | return -EEXIST; | ||
275 | } | ||
276 | } | ||
277 | up_write(&mm->mmap_sem); | ||
278 | |||
279 | return map_vdso(image, addr); | ||
280 | } | ||
281 | |||
252 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) | 282 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
253 | static int load_vdso32(void) | 283 | static int load_vdso32(void) |
254 | { | 284 | { |
255 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ | 285 | if (vdso32_enabled != 1) /* Other values all mean "disabled" */ |
256 | return 0; | 286 | return 0; |
257 | 287 | ||
258 | return map_vdso(&vdso_image_32, false); | 288 | return map_vdso(&vdso_image_32, 0); |
259 | } | 289 | } |
260 | #endif | 290 | #endif |
261 | 291 | ||
@@ -265,7 +295,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
265 | if (!vdso64_enabled) | 295 | if (!vdso64_enabled) |
266 | return 0; | 296 | return 0; |
267 | 297 | ||
268 | return map_vdso(&vdso_image_64, true); | 298 | return map_vdso_randomized(&vdso_image_64); |
269 | } | 299 | } |
270 | 300 | ||
271 | #ifdef CONFIG_COMPAT | 301 | #ifdef CONFIG_COMPAT |
@@ -276,8 +306,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, | |||
276 | if (test_thread_flag(TIF_X32)) { | 306 | if (test_thread_flag(TIF_X32)) { |
277 | if (!vdso64_enabled) | 307 | if (!vdso64_enabled) |
278 | return 0; | 308 | return 0; |
279 | 309 | return map_vdso_randomized(&vdso_image_x32); | |
280 | return map_vdso(&vdso_image_x32, true); | ||
281 | } | 310 | } |
282 | #endif | 311 | #endif |
283 | #ifdef CONFIG_IA32_EMULATION | 312 | #ifdef CONFIG_IA32_EMULATION |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 2f29f4e407c3..cb13c0564ea7 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -378,7 +378,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, | |||
378 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); | 378 | put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); |
379 | } put_user_catch(err); | 379 | } put_user_catch(err); |
380 | 380 | ||
381 | err |= copy_siginfo_to_user32(&frame->info, &ksig->info); | 381 | err |= __copy_siginfo_to_user32(&frame->info, &ksig->info, false); |
382 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, | 382 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
383 | regs, set->sig[0]); | 383 | regs, set->sig[0]); |
384 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 384 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index a18806165fe4..03d269bed941 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -275,10 +275,10 @@ struct compat_shmid64_ds { | |||
275 | #ifdef CONFIG_X86_X32_ABI | 275 | #ifdef CONFIG_X86_X32_ABI |
276 | typedef struct user_regs_struct compat_elf_gregset_t; | 276 | typedef struct user_regs_struct compat_elf_gregset_t; |
277 | 277 | ||
278 | #define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216) | 278 | /* Full regset -- prstatus on x32, otherwise on ia32 */ |
279 | #define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296) | 279 | #define PRSTATUS_SIZE(S, R) (R != sizeof(S.pr_reg) ? 144 : 296) |
280 | #define SET_PR_FPVALID(S,V) \ | 280 | #define SET_PR_FPVALID(S, V, R) \ |
281 | do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \ | 281 | do { *(int *) (((void *) &((S)->pr_reg)) + R) = (V); } \ |
282 | while (0) | 282 | while (0) |
283 | 283 | ||
284 | #define COMPAT_USE_64BIT_TIME \ | 284 | #define COMPAT_USE_64BIT_TIME \ |
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h index 0e970d00dfcd..20a1fbf7fe4e 100644 --- a/arch/x86/include/asm/fpu/signal.h +++ b/arch/x86/include/asm/fpu/signal.h | |||
@@ -19,6 +19,12 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, | |||
19 | # define ia32_setup_rt_frame __setup_rt_frame | 19 | # define ia32_setup_rt_frame __setup_rt_frame |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_COMPAT | ||
23 | int __copy_siginfo_to_user32(compat_siginfo_t __user *to, | ||
24 | const siginfo_t *from, bool x32_ABI); | ||
25 | #endif | ||
26 | |||
27 | |||
22 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, | 28 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, |
23 | struct task_struct *tsk); | 29 | struct task_struct *tsk); |
24 | extern void convert_to_fxsr(struct task_struct *tsk, | 30 | extern void convert_to_fxsr(struct task_struct *tsk, |
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index dd1e7d6387ab..8af22be0fe61 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h | |||
@@ -23,6 +23,10 @@ typedef struct { | |||
23 | unsigned long sig[_NSIG_WORDS]; | 23 | unsigned long sig[_NSIG_WORDS]; |
24 | } sigset_t; | 24 | } sigset_t; |
25 | 25 | ||
26 | /* non-uapi in-kernel SA_FLAGS for those indicates ABI for a signal frame */ | ||
27 | #define SA_IA32_ABI 0x02000000u | ||
28 | #define SA_X32_ABI 0x01000000u | ||
29 | |||
26 | #ifndef CONFIG_COMPAT | 30 | #ifndef CONFIG_COMPAT |
27 | typedef sigset_t compat_sigset_t; | 31 | typedef sigset_t compat_sigset_t; |
28 | #endif | 32 | #endif |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 43dc55be524e..2444189cbe28 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -41,6 +41,8 @@ extern const struct vdso_image vdso_image_32; | |||
41 | 41 | ||
42 | extern void __init init_vdso_image(const struct vdso_image *image); | 42 | extern void __init init_vdso_image(const struct vdso_image *image); |
43 | 43 | ||
44 | extern int map_vdso_once(const struct vdso_image *image, unsigned long addr); | ||
45 | |||
44 | #endif /* __ASSEMBLER__ */ | 46 | #endif /* __ASSEMBLER__ */ |
45 | 47 | ||
46 | #endif /* _ASM_X86_VDSO_H */ | 48 | #endif /* _ASM_X86_VDSO_H */ |
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h index 3ac5032fae09..ae135de547f5 100644 --- a/arch/x86/include/uapi/asm/prctl.h +++ b/arch/x86/include/uapi/asm/prctl.h | |||
@@ -6,4 +6,10 @@ | |||
6 | #define ARCH_GET_FS 0x1003 | 6 | #define ARCH_GET_FS 0x1003 |
7 | #define ARCH_GET_GS 0x1004 | 7 | #define ARCH_GET_GS 0x1004 |
8 | 8 | ||
9 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
10 | # define ARCH_MAP_VDSO_X32 0x2001 | ||
11 | # define ARCH_MAP_VDSO_32 0x2002 | ||
12 | # define ARCH_MAP_VDSO_64 0x2003 | ||
13 | #endif | ||
14 | |||
9 | #endif /* _ASM_X86_PRCTL_H */ | 15 | #endif /* _ASM_X86_PRCTL_H */ |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index de9acaf2d371..ee944bd2310d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
50 | #include <asm/switch_to.h> | 50 | #include <asm/switch_to.h> |
51 | #include <asm/xen/hypervisor.h> | 51 | #include <asm/xen/hypervisor.h> |
52 | #include <asm/vdso.h> | ||
52 | 53 | ||
53 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); | 54 | __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); |
54 | 55 | ||
@@ -523,6 +524,19 @@ void set_personality_ia32(bool x32) | |||
523 | } | 524 | } |
524 | EXPORT_SYMBOL_GPL(set_personality_ia32); | 525 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
525 | 526 | ||
527 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
528 | static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) | ||
529 | { | ||
530 | int ret; | ||
531 | |||
532 | ret = map_vdso_once(image, addr); | ||
533 | if (ret) | ||
534 | return ret; | ||
535 | |||
536 | return (long)image->size; | ||
537 | } | ||
538 | #endif | ||
539 | |||
526 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 540 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
527 | { | 541 | { |
528 | int ret = 0; | 542 | int ret = 0; |
@@ -576,6 +590,19 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
576 | break; | 590 | break; |
577 | } | 591 | } |
578 | 592 | ||
593 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
594 | # ifdef CONFIG_X86_X32_ABI | ||
595 | case ARCH_MAP_VDSO_X32: | ||
596 | return prctl_map_vdso(&vdso_image_x32, addr); | ||
597 | # endif | ||
598 | # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | ||
599 | case ARCH_MAP_VDSO_32: | ||
600 | return prctl_map_vdso(&vdso_image_32, addr); | ||
601 | # endif | ||
602 | case ARCH_MAP_VDSO_64: | ||
603 | return prctl_map_vdso(&vdso_image_64, addr); | ||
604 | #endif | ||
605 | |||
579 | default: | 606 | default: |
580 | ret = -EINVAL; | 607 | ret = -EINVAL; |
581 | break; | 608 | break; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index ce94c38cf4d6..0e63c0267f99 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1358,7 +1358,7 @@ void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask) | |||
1358 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | 1358 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
1359 | { | 1359 | { |
1360 | #ifdef CONFIG_IA32_EMULATION | 1360 | #ifdef CONFIG_IA32_EMULATION |
1361 | if (test_tsk_thread_flag(task, TIF_IA32)) | 1361 | if (!user_64bit_mode(task_pt_regs(task))) |
1362 | #endif | 1362 | #endif |
1363 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | 1363 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
1364 | return &user_x86_32_view; | 1364 | return &user_x86_32_view; |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index da20ecb5397a..763af1d0de64 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/syscalls.h> | 42 | #include <asm/syscalls.h> |
43 | 43 | ||
44 | #include <asm/sigframe.h> | 44 | #include <asm/sigframe.h> |
45 | #include <asm/signal.h> | ||
45 | 46 | ||
46 | #define COPY(x) do { \ | 47 | #define COPY(x) do { \ |
47 | get_user_ex(regs->x, &sc->x); \ | 48 | get_user_ex(regs->x, &sc->x); \ |
@@ -547,7 +548,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, | |||
547 | return -EFAULT; | 548 | return -EFAULT; |
548 | 549 | ||
549 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { | 550 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
550 | if (copy_siginfo_to_user32(&frame->info, &ksig->info)) | 551 | if (__copy_siginfo_to_user32(&frame->info, &ksig->info, true)) |
551 | return -EFAULT; | 552 | return -EFAULT; |
552 | } | 553 | } |
553 | 554 | ||
@@ -660,20 +661,21 @@ badframe: | |||
660 | return 0; | 661 | return 0; |
661 | } | 662 | } |
662 | 663 | ||
663 | static inline int is_ia32_compat_frame(void) | 664 | static inline int is_ia32_compat_frame(struct ksignal *ksig) |
664 | { | 665 | { |
665 | return IS_ENABLED(CONFIG_IA32_EMULATION) && | 666 | return IS_ENABLED(CONFIG_IA32_EMULATION) && |
666 | test_thread_flag(TIF_IA32); | 667 | ksig->ka.sa.sa_flags & SA_IA32_ABI; |
667 | } | 668 | } |
668 | 669 | ||
669 | static inline int is_ia32_frame(void) | 670 | static inline int is_ia32_frame(struct ksignal *ksig) |
670 | { | 671 | { |
671 | return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(); | 672 | return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig); |
672 | } | 673 | } |
673 | 674 | ||
674 | static inline int is_x32_frame(void) | 675 | static inline int is_x32_frame(struct ksignal *ksig) |
675 | { | 676 | { |
676 | return IS_ENABLED(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | 677 | return IS_ENABLED(CONFIG_X86_X32_ABI) && |
678 | ksig->ka.sa.sa_flags & SA_X32_ABI; | ||
677 | } | 679 | } |
678 | 680 | ||
679 | static int | 681 | static int |
@@ -684,12 +686,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) | |||
684 | compat_sigset_t *cset = (compat_sigset_t *) set; | 686 | compat_sigset_t *cset = (compat_sigset_t *) set; |
685 | 687 | ||
686 | /* Set up the stack frame */ | 688 | /* Set up the stack frame */ |
687 | if (is_ia32_frame()) { | 689 | if (is_ia32_frame(ksig)) { |
688 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | 690 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
689 | return ia32_setup_rt_frame(usig, ksig, cset, regs); | 691 | return ia32_setup_rt_frame(usig, ksig, cset, regs); |
690 | else | 692 | else |
691 | return ia32_setup_frame(usig, ksig, cset, regs); | 693 | return ia32_setup_frame(usig, ksig, cset, regs); |
692 | } else if (is_x32_frame()) { | 694 | } else if (is_x32_frame(ksig)) { |
693 | return x32_setup_rt_frame(ksig, cset, regs); | 695 | return x32_setup_rt_frame(ksig, cset, regs); |
694 | } else { | 696 | } else { |
695 | return __setup_rt_frame(ksig->sig, ksig, set, regs); | 697 | return __setup_rt_frame(ksig->sig, ksig, set, regs); |
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index b44564bf86a8..40df33753bae 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/compat.h> | 1 | #include <linux/compat.h> |
2 | #include <linux/uaccess.h> | 2 | #include <linux/uaccess.h> |
3 | #include <linux/ptrace.h> | ||
3 | 4 | ||
4 | /* | 5 | /* |
5 | * The compat_siginfo_t structure and handing code is very easy | 6 | * The compat_siginfo_t structure and handing code is very easy |
@@ -92,10 +93,31 @@ static inline void signal_compat_build_tests(void) | |||
92 | /* any new si_fields should be added here */ | 93 | /* any new si_fields should be added here */ |
93 | } | 94 | } |
94 | 95 | ||
95 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | 96 | void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact) |
97 | { | ||
98 | /* Don't leak in-kernel non-uapi flags to user-space */ | ||
99 | if (oact) | ||
100 | oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); | ||
101 | |||
102 | if (!act) | ||
103 | return; | ||
104 | |||
105 | /* Don't let flags to be set from userspace */ | ||
106 | act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); | ||
107 | |||
108 | if (user_64bit_mode(current_pt_regs())) | ||
109 | return; | ||
110 | |||
111 | if (in_ia32_syscall()) | ||
112 | act->sa.sa_flags |= SA_IA32_ABI; | ||
113 | if (in_x32_syscall()) | ||
114 | act->sa.sa_flags |= SA_X32_ABI; | ||
115 | } | ||
116 | |||
117 | int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, | ||
118 | bool x32_ABI) | ||
96 | { | 119 | { |
97 | int err = 0; | 120 | int err = 0; |
98 | bool ia32 = test_thread_flag(TIF_IA32); | ||
99 | 121 | ||
100 | signal_compat_build_tests(); | 122 | signal_compat_build_tests(); |
101 | 123 | ||
@@ -146,7 +168,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | |||
146 | put_user_ex(from->si_arch, &to->si_arch); | 168 | put_user_ex(from->si_arch, &to->si_arch); |
147 | break; | 169 | break; |
148 | case __SI_CHLD >> 16: | 170 | case __SI_CHLD >> 16: |
149 | if (ia32) { | 171 | if (!x32_ABI) { |
150 | put_user_ex(from->si_utime, &to->si_utime); | 172 | put_user_ex(from->si_utime, &to->si_utime); |
151 | put_user_ex(from->si_stime, &to->si_stime); | 173 | put_user_ex(from->si_stime, &to->si_stime); |
152 | } else { | 174 | } else { |
@@ -180,6 +202,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | |||
180 | return err; | 202 | return err; |
181 | } | 203 | } |
182 | 204 | ||
205 | /* from syscall's path, where we know the ABI */ | ||
206 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | ||
207 | { | ||
208 | return __copy_siginfo_to_user32(to, from, in_x32_syscall()); | ||
209 | } | ||
210 | |||
183 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | 211 | int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) |
184 | { | 212 | { |
185 | int err = 0; | 213 | int err = 0; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index e5495f37c6ed..2472af2798c7 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1624,20 +1624,12 @@ static void do_thread_regset_writeback(struct task_struct *task, | |||
1624 | regset->writeback(task, regset, 1); | 1624 | regset->writeback(task, regset, 1); |
1625 | } | 1625 | } |
1626 | 1626 | ||
1627 | #ifndef PR_REG_SIZE | ||
1628 | #define PR_REG_SIZE(S) sizeof(S) | ||
1629 | #endif | ||
1630 | |||
1631 | #ifndef PRSTATUS_SIZE | 1627 | #ifndef PRSTATUS_SIZE |
1632 | #define PRSTATUS_SIZE(S) sizeof(S) | 1628 | #define PRSTATUS_SIZE(S, R) sizeof(S) |
1633 | #endif | ||
1634 | |||
1635 | #ifndef PR_REG_PTR | ||
1636 | #define PR_REG_PTR(S) (&((S)->pr_reg)) | ||
1637 | #endif | 1629 | #endif |
1638 | 1630 | ||
1639 | #ifndef SET_PR_FPVALID | 1631 | #ifndef SET_PR_FPVALID |
1640 | #define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V)) | 1632 | #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V)) |
1641 | #endif | 1633 | #endif |
1642 | 1634 | ||
1643 | static int fill_thread_core_info(struct elf_thread_core_info *t, | 1635 | static int fill_thread_core_info(struct elf_thread_core_info *t, |
@@ -1645,6 +1637,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1645 | long signr, size_t *total) | 1637 | long signr, size_t *total) |
1646 | { | 1638 | { |
1647 | unsigned int i; | 1639 | unsigned int i; |
1640 | unsigned int regset_size = view->regsets[0].n * view->regsets[0].size; | ||
1648 | 1641 | ||
1649 | /* | 1642 | /* |
1650 | * NT_PRSTATUS is the one special case, because the regset data | 1643 | * NT_PRSTATUS is the one special case, because the regset data |
@@ -1653,12 +1646,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1653 | * We assume that regset 0 is NT_PRSTATUS. | 1646 | * We assume that regset 0 is NT_PRSTATUS. |
1654 | */ | 1647 | */ |
1655 | fill_prstatus(&t->prstatus, t->task, signr); | 1648 | fill_prstatus(&t->prstatus, t->task, signr); |
1656 | (void) view->regsets[0].get(t->task, &view->regsets[0], | 1649 | (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size, |
1657 | 0, PR_REG_SIZE(t->prstatus.pr_reg), | 1650 | &t->prstatus.pr_reg, NULL); |
1658 | PR_REG_PTR(&t->prstatus), NULL); | ||
1659 | 1651 | ||
1660 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, | 1652 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, |
1661 | PRSTATUS_SIZE(t->prstatus), &t->prstatus); | 1653 | PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus); |
1662 | *total += notesize(&t->notes[0]); | 1654 | *total += notesize(&t->notes[0]); |
1663 | 1655 | ||
1664 | do_thread_regset_writeback(t->task, &view->regsets[0]); | 1656 | do_thread_regset_writeback(t->task, &view->regsets[0]); |
@@ -1688,7 +1680,8 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, | |||
1688 | regset->core_note_type, | 1680 | regset->core_note_type, |
1689 | size, data); | 1681 | size, data); |
1690 | else { | 1682 | else { |
1691 | SET_PR_FPVALID(&t->prstatus, 1); | 1683 | SET_PR_FPVALID(&t->prstatus, |
1684 | 1, regset_size); | ||
1692 | fill_note(&t->notes[i], "CORE", | 1685 | fill_note(&t->notes[i], "CORE", |
1693 | NT_PRFPREG, size, data); | 1686 | NT_PRFPREG, size, data); |
1694 | } | 1687 | } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ef815b9cd426..5f14534f0c90 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2019,6 +2019,8 @@ extern struct file *get_task_exe_file(struct task_struct *task); | |||
2019 | extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); | 2019 | extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); |
2020 | extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); | 2020 | extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); |
2021 | 2021 | ||
2022 | extern bool vma_is_special_mapping(const struct vm_area_struct *vma, | ||
2023 | const struct vm_special_mapping *sm); | ||
2022 | extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, | 2024 | extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, |
2023 | unsigned long addr, unsigned long len, | 2025 | unsigned long addr, unsigned long len, |
2024 | unsigned long flags, | 2026 | unsigned long flags, |
diff --git a/kernel/signal.c b/kernel/signal.c index af21afc00d08..75761acc77cf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -3044,6 +3044,11 @@ void kernel_sigaction(int sig, __sighandler_t action) | |||
3044 | } | 3044 | } |
3045 | EXPORT_SYMBOL(kernel_sigaction); | 3045 | EXPORT_SYMBOL(kernel_sigaction); |
3046 | 3046 | ||
3047 | void __weak sigaction_compat_abi(struct k_sigaction *act, | ||
3048 | struct k_sigaction *oact) | ||
3049 | { | ||
3050 | } | ||
3051 | |||
3047 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | 3052 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
3048 | { | 3053 | { |
3049 | struct task_struct *p = current, *t; | 3054 | struct task_struct *p = current, *t; |
@@ -3059,6 +3064,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
3059 | if (oact) | 3064 | if (oact) |
3060 | *oact = *k; | 3065 | *oact = *k; |
3061 | 3066 | ||
3067 | sigaction_compat_abi(act, oact); | ||
3068 | |||
3062 | if (act) { | 3069 | if (act) { |
3063 | sigdelsetmask(&act->sa.sa_mask, | 3070 | sigdelsetmask(&act->sa.sa_mask, |
3064 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | 3071 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
@@ -3068,6 +3068,14 @@ out: | |||
3068 | return ERR_PTR(ret); | 3068 | return ERR_PTR(ret); |
3069 | } | 3069 | } |
3070 | 3070 | ||
3071 | bool vma_is_special_mapping(const struct vm_area_struct *vma, | ||
3072 | const struct vm_special_mapping *sm) | ||
3073 | { | ||
3074 | return vma->vm_private_data == sm && | ||
3075 | (vma->vm_ops == &special_mapping_vmops || | ||
3076 | vma->vm_ops == &legacy_special_mapping_vmops); | ||
3077 | } | ||
3078 | |||
3071 | /* | 3079 | /* |
3072 | * Called with mm->mmap_sem held for writing. | 3080 | * Called with mm->mmap_sem held for writing. |
3073 | * Insert a new vma covering the given region, with the given flags. | 3081 | * Insert a new vma covering the given region, with the given flags. |