aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-05-19 18:58:33 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-20 14:38:42 -0400
commita62c34bd2a8a3f159945becd57401e478818d51c (patch)
tree8721aca251b468606e52376fc811dd0c8beeaeb8 /arch/x86/vdso
parent78d683e838a60ec4ba4591cca4364cba84a9e626 (diff)
x86, mm: Improve _install_special_mapping and fix x86 vdso naming
Using arch_vma_name to give special mappings a name is awkward. x86 currently implements it by comparing the start address of the vma to the expected address of the vdso. This requires tracking the start address of special mappings and is probably buggy if a special vma is split or moved. Improve _install_special_mapping to just name the vma directly. Use it to give the x86 vvar area a name, which should make CRIU's life easier. As a side effect, the vvar area will show up in core dumps. This could be considered weird and is fixable. [hpa: I say we accept this as-is but be prepared to deal with knocking out the vvars from core dumps if this becomes a problem.] Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Pavel Emelyanov <xemul@parallels.com> Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/276b39b6b645fb11e345457b503f17b83c2c6fd0.1400538962.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r--arch/x86/vdso/vdso2c.h5
-rw-r--r--arch/x86/vdso/vdso32-setup.c7
-rw-r--r--arch/x86/vdso/vma.c25
3 files changed, 20 insertions, 17 deletions
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index ed2e894e89ab..3dcc61e796e9 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -136,7 +136,10 @@ static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
136 fprintf(outfile, "const struct vdso_image %s = {\n", name); 136 fprintf(outfile, "const struct vdso_image %s = {\n", name);
137 fprintf(outfile, "\t.data = raw_data,\n"); 137 fprintf(outfile, "\t.data = raw_data,\n");
138 fprintf(outfile, "\t.size = %lu,\n", data_size); 138 fprintf(outfile, "\t.size = %lu,\n", data_size);
139 fprintf(outfile, "\t.pages = pages,\n"); 139 fprintf(outfile, "\t.text_mapping = {\n");
140 fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
141 fprintf(outfile, "\t\t.pages = pages,\n");
142 fprintf(outfile, "\t},\n");
140 if (alt_sec) { 143 if (alt_sec) {
141 fprintf(outfile, "\t.alt = %lu,\n", 144 fprintf(outfile, "\t.alt = %lu,\n",
142 (unsigned long)alt_sec->sh_offset); 145 (unsigned long)alt_sec->sh_offset);
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index c3ed708e50f4..e4f7781ee162 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -119,13 +119,6 @@ __initcall(ia32_binfmt_init);
119 119
120#else /* CONFIG_X86_32 */ 120#else /* CONFIG_X86_32 */
121 121
122const char *arch_vma_name(struct vm_area_struct *vma)
123{
124 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
125 return "[vdso]";
126 return NULL;
127}
128
129struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
130{ 123{
131 return NULL; 124 return NULL;
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 8ad0081df7a8..e1513c47872a 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -30,7 +30,8 @@ void __init init_vdso_image(const struct vdso_image *image)
30 30
31 BUG_ON(image->size % PAGE_SIZE != 0); 31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++) 32 for (i = 0; i < npages; i++)
33 image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE); 33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
34 35
35 apply_alternatives((struct alt_instr *)(image->data + image->alt), 36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 (struct alt_instr *)(image->data + image->alt + 37 (struct alt_instr *)(image->data + image->alt +
@@ -91,6 +92,10 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
91 unsigned long addr; 92 unsigned long addr;
92 int ret = 0; 93 int ret = 0;
93 static struct page *no_pages[] = {NULL}; 94 static struct page *no_pages[] = {NULL};
95 static struct vm_special_mapping vvar_mapping = {
96 .name = "[vvar]",
97 .pages = no_pages,
98 };
94 99
95 if (calculate_addr) { 100 if (calculate_addr) {
96 addr = vdso_addr(current->mm->start_stack, 101 addr = vdso_addr(current->mm->start_stack,
@@ -112,21 +117,23 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
112 /* 117 /*
113 * MAYWRITE to allow gdb to COW and set breakpoints 118 * MAYWRITE to allow gdb to COW and set breakpoints
114 */ 119 */
115 ret = install_special_mapping(mm, 120 vma = _install_special_mapping(mm,
116 addr, 121 addr,
117 image->size, 122 image->size,
118 VM_READ|VM_EXEC| 123 VM_READ|VM_EXEC|
119 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 124 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
120 image->pages); 125 &image->text_mapping);
121 126
122 if (ret) 127 if (IS_ERR(vma)) {
128 ret = PTR_ERR(vma);
123 goto up_fail; 129 goto up_fail;
130 }
124 131
125 vma = _install_special_mapping(mm, 132 vma = _install_special_mapping(mm,
126 addr + image->size, 133 addr + image->size,
127 image->sym_end_mapping - image->size, 134 image->sym_end_mapping - image->size,
128 VM_READ, 135 VM_READ,
129 no_pages); 136 &vvar_mapping);
130 137
131 if (IS_ERR(vma)) { 138 if (IS_ERR(vma)) {
132 ret = PTR_ERR(vma); 139 ret = PTR_ERR(vma);