diff options
author | Dmitry Safonov <dsafonov@virtuozzo.com> | 2016-09-05 09:33:05 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-14 15:28:09 -0400 |
commit | 2eefd8789698e89c4a5d610921dc3c1b66e3bd0d (patch) | |
tree | f9f1fe811f7f2c5395b4c5ba2bb9d6b54ea026cd | |
parent | 576ebfefd37bd41e965787f60684c8e4b7f79457 (diff) |
x86/arch_prctl/vdso: Add ARCH_MAP_VDSO_*
Add API to change vdso blob type with arch_prctl.
As this is usefull only by needs of CRIU, expose
this interface under CONFIG_CHECKPOINT_RESTORE.
Signed-off-by: Dmitry Safonov <dsafonov@virtuozzo.com>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: 0x7f454c46@gmail.com
Cc: oleg@redhat.com
Cc: linux-mm@kvack.org
Cc: gorcunov@openvz.org
Cc: xemul@virtuozzo.com
Link: http://lkml.kernel.org/r/20160905133308.28234-4-dsafonov@virtuozzo.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 45 | ||||
-rw-r--r-- | arch/x86/include/asm/vdso.h | 2 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/prctl.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 25 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/mmap.c | 8 |
6 files changed, 78 insertions, 10 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 5bcb25a9e573..4459e73e234d 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c | |||
@@ -176,6 +176,16 @@ static int vvar_fault(const struct vm_special_mapping *sm, | |||
176 | return VM_FAULT_SIGBUS; | 176 | return VM_FAULT_SIGBUS; |
177 | } | 177 | } |
178 | 178 | ||
179 | static const struct vm_special_mapping vdso_mapping = { | ||
180 | .name = "[vdso]", | ||
181 | .fault = vdso_fault, | ||
182 | .mremap = vdso_mremap, | ||
183 | }; | ||
184 | static const struct vm_special_mapping vvar_mapping = { | ||
185 | .name = "[vvar]", | ||
186 | .fault = vvar_fault, | ||
187 | }; | ||
188 | |||
179 | /* | 189 | /* |
180 | * Add vdso and vvar mappings to current process. | 190 | * Add vdso and vvar mappings to current process. |
181 | * @image - blob to map | 191 | * @image - blob to map |
@@ -188,16 +198,6 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) | |||
188 | unsigned long text_start; | 198 | unsigned long text_start; |
189 | int ret = 0; | 199 | int ret = 0; |
190 | 200 | ||
191 | static const struct vm_special_mapping vdso_mapping = { | ||
192 | .name = "[vdso]", | ||
193 | .fault = vdso_fault, | ||
194 | .mremap = vdso_mremap, | ||
195 | }; | ||
196 | static const struct vm_special_mapping vvar_mapping = { | ||
197 | .name = "[vvar]", | ||
198 | .fault = vvar_fault, | ||
199 | }; | ||
200 | |||
201 | if (down_write_killable(&mm->mmap_sem)) | 201 | if (down_write_killable(&mm->mmap_sem)) |
202 | return -EINTR; | 202 | return -EINTR; |
203 | 203 | ||
@@ -256,6 +256,31 @@ static int map_vdso_randomized(const struct vdso_image *image) | |||
256 | return map_vdso(image, addr); | 256 | return map_vdso(image, addr); |
257 | } | 257 | } |
258 | 258 | ||
259 | int map_vdso_once(const struct vdso_image *image, unsigned long addr) | ||
260 | { | ||
261 | struct mm_struct *mm = current->mm; | ||
262 | struct vm_area_struct *vma; | ||
263 | |||
264 | down_write(&mm->mmap_sem); | ||
265 | /* | ||
266 | * Check if we have already mapped vdso blob - fail to prevent | ||
267 | * abusing from userspace install_speciall_mapping, which may | ||
268 | * not do accounting and rlimit right. | ||
269 | * We could search vma near context.vdso, but it's a slowpath, | ||
270 | * so let's explicitely check all VMAs to be completely sure. | ||
271 | */ | ||
272 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
273 | if (vma_is_special_mapping(vma, &vdso_mapping) || | ||
274 | vma_is_special_mapping(vma, &vvar_mapping)) { | ||
275 | up_write(&mm->mmap_sem); | ||
276 | return -EEXIST; | ||
277 | } | ||
278 | } | ||
279 | up_write(&mm->mmap_sem); | ||
280 | |||
281 | return map_vdso(image, addr); | ||
282 | } | ||
283 | |||
259 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) | 284 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
260 | static int load_vdso32(void) | 285 | static int load_vdso32(void) |
261 | { | 286 | { |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 43dc55be524e..2444189cbe28 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -41,6 +41,8 @@ extern const struct vdso_image vdso_image_32; | |||
41 | 41 | ||
42 | extern void __init init_vdso_image(const struct vdso_image *image); | 42 | extern void __init init_vdso_image(const struct vdso_image *image); |
43 | 43 | ||
44 | extern int map_vdso_once(const struct vdso_image *image, unsigned long addr); | ||
45 | |||
44 | #endif /* __ASSEMBLER__ */ | 46 | #endif /* __ASSEMBLER__ */ |
45 | 47 | ||
46 | #endif /* _ASM_X86_VDSO_H */ | 48 | #endif /* _ASM_X86_VDSO_H */ |
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h index 3ac5032fae09..ae135de547f5 100644 --- a/arch/x86/include/uapi/asm/prctl.h +++ b/arch/x86/include/uapi/asm/prctl.h | |||
@@ -6,4 +6,10 @@ | |||
6 | #define ARCH_GET_FS 0x1003 | 6 | #define ARCH_GET_FS 0x1003 |
7 | #define ARCH_GET_GS 0x1004 | 7 | #define ARCH_GET_GS 0x1004 |
8 | 8 | ||
9 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
10 | # define ARCH_MAP_VDSO_X32 0x2001 | ||
11 | # define ARCH_MAP_VDSO_32 0x2002 | ||
12 | # define ARCH_MAP_VDSO_64 0x2003 | ||
13 | #endif | ||
14 | |||
9 | #endif /* _ASM_X86_PRCTL_H */ | 15 | #endif /* _ASM_X86_PRCTL_H */ |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 63236d8f84bf..f240a465920b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
50 | #include <asm/switch_to.h> | 50 | #include <asm/switch_to.h> |
51 | #include <asm/xen/hypervisor.h> | 51 | #include <asm/xen/hypervisor.h> |
52 | #include <asm/vdso.h> | ||
52 | 53 | ||
53 | asmlinkage extern void ret_from_fork(void); | 54 | asmlinkage extern void ret_from_fork(void); |
54 | 55 | ||
@@ -524,6 +525,17 @@ void set_personality_ia32(bool x32) | |||
524 | } | 525 | } |
525 | EXPORT_SYMBOL_GPL(set_personality_ia32); | 526 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
526 | 527 | ||
528 | static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) | ||
529 | { | ||
530 | int ret; | ||
531 | |||
532 | ret = map_vdso_once(image, addr); | ||
533 | if (ret) | ||
534 | return ret; | ||
535 | |||
536 | return (long)image->size; | ||
537 | } | ||
538 | |||
527 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 539 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
528 | { | 540 | { |
529 | int ret = 0; | 541 | int ret = 0; |
@@ -577,6 +589,19 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
577 | break; | 589 | break; |
578 | } | 590 | } |
579 | 591 | ||
592 | #ifdef CONFIG_CHECKPOINT_RESTORE | ||
593 | #ifdef CONFIG_X86_X32 | ||
594 | case ARCH_MAP_VDSO_X32: | ||
595 | return prctl_map_vdso(&vdso_image_x32, addr); | ||
596 | #endif | ||
597 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | ||
598 | case ARCH_MAP_VDSO_32: | ||
599 | return prctl_map_vdso(&vdso_image_32, addr); | ||
600 | #endif | ||
601 | case ARCH_MAP_VDSO_64: | ||
602 | return prctl_map_vdso(&vdso_image_64, addr); | ||
603 | #endif | ||
604 | |||
580 | default: | 605 | default: |
581 | ret = -EINVAL; | 606 | ret = -EINVAL; |
582 | break; | 607 | break; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ef815b9cd426..5f14534f0c90 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2019,6 +2019,8 @@ extern struct file *get_task_exe_file(struct task_struct *task); | |||
2019 | extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); | 2019 | extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); |
2020 | extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); | 2020 | extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); |
2021 | 2021 | ||
2022 | extern bool vma_is_special_mapping(const struct vm_area_struct *vma, | ||
2023 | const struct vm_special_mapping *sm); | ||
2022 | extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, | 2024 | extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, |
2023 | unsigned long addr, unsigned long len, | 2025 | unsigned long addr, unsigned long len, |
2024 | unsigned long flags, | 2026 | unsigned long flags, |
@@ -3063,6 +3063,14 @@ out: | |||
3063 | return ERR_PTR(ret); | 3063 | return ERR_PTR(ret); |
3064 | } | 3064 | } |
3065 | 3065 | ||
3066 | bool vma_is_special_mapping(const struct vm_area_struct *vma, | ||
3067 | const struct vm_special_mapping *sm) | ||
3068 | { | ||
3069 | return vma->vm_private_data == sm && | ||
3070 | (vma->vm_ops == &special_mapping_vmops || | ||
3071 | vma->vm_ops == &legacy_special_mapping_vmops); | ||
3072 | } | ||
3073 | |||
3066 | /* | 3074 | /* |
3067 | * Called with mm->mmap_sem held for writing. | 3075 | * Called with mm->mmap_sem held for writing. |
3068 | * Insert a new vma covering the given region, with the given flags. | 3076 | * Insert a new vma covering the given region, with the given flags. |