From 8715493852783358ef8656a0054a14bf822509cf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:11 +0100 Subject: arm64: vdso: put vdso datapage in a separate vma The VDSO datapage doesn't need to be executable (no code there) or CoW-able (the kernel writes the page, so a private copy is totally useless). This patch moves the datapage into its own VMA, identified as "[vvar]" in /proc//maps. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel/vdso.c') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 50384fec56c4..84cafbc3eb54 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -138,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - unsigned long vdso_base, vdso_mapping_len; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; int ret; + vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ - vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; + vdso_mapping_len = vdso_text_len + PAGE_SIZE; down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); @@ -152,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, } mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, + ret = install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); - if (ret) { - mm->context.vdso = NULL; + if (ret) + goto up_fail; + + vdso_base += vdso_text_len; + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + vdso_pagelist + vdso_pages); + if (ret) goto up_fail; - } -up_fail: up_write(&mm->mmap_sem); + return 0; +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { + unsigned long vdso_text; + + if (!vma->vm_mm) + return NULL; + + vdso_text = (unsigned long)vma->vm_mm->context.vdso; + /* * We can re-use the vdso pointer in mm_context_t for identifying * the vectors page for compat applications. The vDSO will always * sit above TASK_UNMAPPED_BASE and so we don't need to worry about * it conflicting with the vectors base. */ - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { + if (vma->vm_start == vdso_text) { #ifdef CONFIG_COMPAT if (vma->vm_start == AARCH32_VECTORS_BASE) return "[vectors]"; #endif return "[vdso]"; + } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { + return "[vvar]"; } return NULL; -- cgit v1.2.2 From 2fea7f6c98f5957e539eb8aa0ce849729b900342 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:12 +0100 Subject: arm64: vdso: move to _install_special_mapping and remove arch_vma_name _install_special_mapping replaces install_special_mapping and removes the need to detect special VMA in arch_vma_name. This patch moves the vdso and compat vectors page code over to the new API. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 80 +++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 45 deletions(-) (limited to 'arch/arm64/kernel/vdso.c') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 84cafbc3eb54..60ae12087d9f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr = AARCH32_VECTORS_BASE; - int ret; + static struct vm_special_mapping spec = { + .name = "[vectors]", + .pages = vectors_page, + + }; + void *ret; down_write(&mm->mmap_sem); current->mm->context.vdso = (void *)addr; /* Map vectors page at the high address. */ - ret = install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, - vectors_page); + ret = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, + &spec); up_write(&mm->mmap_sem); - return ret; + return PTR_ERR_OR_ZERO(ret); } #endif /* CONFIG_COMPAT */ +static struct vm_special_mapping vdso_spec[2]; + static int __init vdso_init(void) { int i; @@ -130,6 +137,17 @@ static int __init vdso_init(void) /* Grab the vDSO data page. */ vdso_pagelist[i] = virt_to_page(vdso_data); + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist + vdso_pages, + }; + return 0; } arch_initcall(vdso_init); @@ -139,7 +157,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, { struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_text_len, vdso_mapping_len; - int ret; + void *ret; vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ @@ -148,23 +166,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { - ret = vdso_base; + ret = ERR_PTR(vdso_base); goto up_fail; } mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - vdso_pagelist); - if (ret) + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[0]); + if (IS_ERR(ret)) goto up_fail; vdso_base += vdso_text_len; - ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, - vdso_pagelist + vdso_pages); - if (ret) + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[1]); + if (IS_ERR(ret)) goto up_fail; up_write(&mm->mmap_sem); @@ -173,35 +191,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, up_fail: mm->context.vdso = NULL; up_write(&mm->mmap_sem); - return ret; -} - -const char *arch_vma_name(struct vm_area_struct *vma) -{ - unsigned long vdso_text; - - if (!vma->vm_mm) - return NULL; - - vdso_text = (unsigned long)vma->vm_mm->context.vdso; - - /* - * We can re-use the vdso pointer in mm_context_t for identifying - * the vectors page for compat applications. The vDSO will always - * sit above TASK_UNMAPPED_BASE and so we don't need to worry about - * it conflicting with the vectors base. - */ - if (vma->vm_start == vdso_text) { -#ifdef CONFIG_COMPAT - if (vma->vm_start == AARCH32_VECTORS_BASE) - return "[vectors]"; -#endif - return "[vdso]"; - } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { - return "[vvar]"; - } - - return NULL; + return PTR_ERR(ret); } /* -- cgit v1.2.2 From 601255ae3c98fdeeee3a8bb4696425e4f868b4f1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:13 +0100 Subject: arm64: vdso: move data page before code pages Andy pointed out that binutils generates additional sections in the vdso image (e.g. section string table) which, if our .text section gets big enough, could cross a page boundary and end up screwing up the location where the kernel expects to put the data page. This patch solves the issue in the same manner as x86_32, by moving the data page before the code pages. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'arch/arm64/kernel/vdso.c') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 60ae12087d9f..24f2e8c62479 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -121,8 +121,8 @@ static int __init vdso_init(void) } vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; - pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", - vdso_pages + 1, vdso_pages, 1L, &vdso_start); + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); /* Allocate the vDSO pagelist, plus a page for the data. */ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), @@ -130,22 +130,22 @@ static int __init vdso_init(void) if (vdso_pagelist == NULL) return -ENOMEM; + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + /* Grab the vDSO code pages. */ for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); - - /* Grab the vDSO data page. */ - vdso_pagelist[i] = virt_to_page(vdso_data); + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); /* Populate the special mapping structures */ vdso_spec[0] = (struct vm_special_mapping) { - .name = "[vdso]", + .name = "[vvar]", .pages = vdso_pagelist, }; vdso_spec[1] = (struct vm_special_mapping) { - .name = "[vvar]", - .pages = vdso_pagelist + vdso_pages, + .name = "[vdso]", + .pages = &vdso_pagelist[1], }; return 0; @@ -169,22 +169,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ret = ERR_PTR(vdso_base); goto up_fail; } - mm->context.vdso = (void *)vdso_base; - - ret = _install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, &vdso_spec[0]); if (IS_ERR(ret)) goto up_fail; - vdso_base += vdso_text_len; - ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_spec[1]); if (IS_ERR(ret)) goto up_fail; + up_write(&mm->mmap_sem); return 0; -- cgit v1.2.2