aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/vdso.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
commit5167d09ffad5b16b574d35ce3047ed34caf1e837 (patch)
treefc45dd9cbd578f5010e7b8208ecdfc6534547989 /arch/arm64/kernel/vdso.c
parent8533ce72718871fb528d853391746f36243273af (diff)
parentea1719672f59eeb85829073b567495c4f472ac9f (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Once again, Catalin's off on holiday and I'm looking after the arm64 tree. Please can you pull the following arm64 updates for 3.17? Note that this branch also includes the new GICv3 driver (merged via a stable tag from Jason's irqchip tree), since there is a fix for older binutils on top. Changes include: - context tracking support (NO_HZ_FULL) which narrowly missed 3.16 - vDSO layout rework following Andy's work on x86 - TEXT_OFFSET fuzzing for bootloader testing - /proc/cpuinfo tidy-up - preliminary work to support 48-bit virtual addresses, but this is currently disabled until KVM has been ported to use it (the patches do, however, bring some nice clean-up) - boot-time CPU sanity checks (especially useful on heterogenous systems) - support for syscall auditing - support for CC_STACKPROTECTOR - defconfig updates" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (55 commits) arm64: add newline to I-cache policy string Revert "arm64: dmi: Add SMBIOS/DMI support" arm64: fpsimd: fix a typo in fpsimd_save_partial_state ENDPROC arm64: don't call break hooks for BRK exceptions from EL0 arm64: defconfig: enable devtmpfs mount option arm64: vdso: fix build error when switching from LE to BE arm64: defconfig: add virtio support for running as a kvm guest arm64: gicv3: Allow GICv3 compilation with older binutils arm64: fix soft lockup due to large tlb flush range arm64/crypto: fix makefile rule for aes-glue-%.o arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL arm64: Fix barriers used for page table modifications arm64: Add support for 48-bit VA space with 64KB page configuration arm64: asm/pgtable.h pmd/pud definitions clean-up arm64: Determine the vmalloc/vmemmap space at build time based on VA_BITS arm64: Clean up the initial page table creation in head.S arm64: Remove asm/pgtable-*level-types.h files arm64: Remove asm/pgtable-*level-hwdef.h files arm64: Convert bool ARM64_x_LEVELS to int ARM64_PGTABLE_LEVELS arm64: mm: Implement 4 levels of translation tables ...
Diffstat (limited to 'arch/arm64/kernel/vdso.c')
-rw-r--r--arch/arm64/kernel/vdso.c94
1 files changed, 51 insertions, 43 deletions
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 50384fec56c4..24f2e8c62479 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
88{ 88{
89 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
90 unsigned long addr = AARCH32_VECTORS_BASE; 90 unsigned long addr = AARCH32_VECTORS_BASE;
91 int ret; 91 static struct vm_special_mapping spec = {
92 .name = "[vectors]",
93 .pages = vectors_page,
94
95 };
96 void *ret;
92 97
93 down_write(&mm->mmap_sem); 98 down_write(&mm->mmap_sem);
94 current->mm->context.vdso = (void *)addr; 99 current->mm->context.vdso = (void *)addr;
95 100
96 /* Map vectors page at the high address. */ 101 /* Map vectors page at the high address. */
97 ret = install_special_mapping(mm, addr, PAGE_SIZE, 102 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
98 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, 103 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
99 vectors_page); 104 &spec);
100 105
101 up_write(&mm->mmap_sem); 106 up_write(&mm->mmap_sem);
102 107
103 return ret; 108 return PTR_ERR_OR_ZERO(ret);
104} 109}
105#endif /* CONFIG_COMPAT */ 110#endif /* CONFIG_COMPAT */
106 111
112static struct vm_special_mapping vdso_spec[2];
113
107static int __init vdso_init(void) 114static int __init vdso_init(void)
108{ 115{
109 int i; 116 int i;
@@ -114,8 +121,8 @@ static int __init vdso_init(void)
114 } 121 }
115 122
116 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 123 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
117 pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", 124 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
118 vdso_pages + 1, vdso_pages, 1L, &vdso_start); 125 vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
119 126
120 /* Allocate the vDSO pagelist, plus a page for the data. */ 127 /* Allocate the vDSO pagelist, plus a page for the data. */
121 vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), 128 vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -123,12 +130,23 @@ static int __init vdso_init(void)
123 if (vdso_pagelist == NULL) 130 if (vdso_pagelist == NULL)
124 return -ENOMEM; 131 return -ENOMEM;
125 132
133 /* Grab the vDSO data page. */
134 vdso_pagelist[0] = virt_to_page(vdso_data);
135
126 /* Grab the vDSO code pages. */ 136 /* Grab the vDSO code pages. */
127 for (i = 0; i < vdso_pages; i++) 137 for (i = 0; i < vdso_pages; i++)
128 vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); 138 vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
129 139
130 /* Grab the vDSO data page. */ 140 /* Populate the special mapping structures */
131 vdso_pagelist[i] = virt_to_page(vdso_data); 141 vdso_spec[0] = (struct vm_special_mapping) {
142 .name = "[vvar]",
143 .pages = vdso_pagelist,
144 };
145
146 vdso_spec[1] = (struct vm_special_mapping) {
147 .name = "[vdso]",
148 .pages = &vdso_pagelist[1],
149 };
132 150
133 return 0; 151 return 0;
134} 152}
@@ -138,52 +156,42 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
138 int uses_interp) 156 int uses_interp)
139{ 157{
140 struct mm_struct *mm = current->mm; 158 struct mm_struct *mm = current->mm;
141 unsigned long vdso_base, vdso_mapping_len; 159 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
142 int ret; 160 void *ret;
143 161
162 vdso_text_len = vdso_pages << PAGE_SHIFT;
144 /* Be sure to map the data page */ 163 /* Be sure to map the data page */
145 vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; 164 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
146 165
147 down_write(&mm->mmap_sem); 166 down_write(&mm->mmap_sem);
148 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 167 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
149 if (IS_ERR_VALUE(vdso_base)) { 168 if (IS_ERR_VALUE(vdso_base)) {
150 ret = vdso_base; 169 ret = ERR_PTR(vdso_base);
151 goto up_fail; 170 goto up_fail;
152 } 171 }
153 mm->context.vdso = (void *)vdso_base; 172 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
154 173 VM_READ|VM_MAYREAD,
155 ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, 174 &vdso_spec[0]);
156 VM_READ|VM_EXEC| 175 if (IS_ERR(ret))
157 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
158 vdso_pagelist);
159 if (ret) {
160 mm->context.vdso = NULL;
161 goto up_fail; 176 goto up_fail;
162 }
163 177
164up_fail: 178 vdso_base += PAGE_SIZE;
165 up_write(&mm->mmap_sem); 179 mm->context.vdso = (void *)vdso_base;
180 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
181 VM_READ|VM_EXEC|
182 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
183 &vdso_spec[1]);
184 if (IS_ERR(ret))
185 goto up_fail;
166 186
167 return ret;
168}
169 187
170const char *arch_vma_name(struct vm_area_struct *vma) 188 up_write(&mm->mmap_sem);
171{ 189 return 0;
172 /*
173 * We can re-use the vdso pointer in mm_context_t for identifying
174 * the vectors page for compat applications. The vDSO will always
175 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
176 * it conflicting with the vectors base.
177 */
178 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
179#ifdef CONFIG_COMPAT
180 if (vma->vm_start == AARCH32_VECTORS_BASE)
181 return "[vectors]";
182#endif
183 return "[vdso]";
184 }
185 190
186 return NULL; 191up_fail:
192 mm->context.vdso = NULL;
193 up_write(&mm->mmap_sem);
194 return PTR_ERR(ret);
187} 195}
188 196
189/* 197/*