aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@mit.edu>2011-07-21 15:47:10 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-07-21 16:41:53 -0400
commitaafade242ff24fac3aabf61c7861dfa44a3c2445 (patch)
treee28352a5883b5684466ea44f3caebe99c088eaf7 /arch
parentae7bd11b471931752e5609094ca0a49386590524 (diff)
x86-64, vdso: Do not allocate memory for the vDSO
We can map the vDSO straight from kernel data, saving a few page allocations. As an added bonus, the deleted code contained a memory leak. Signed-off-by: Andy Lutomirski <luto@mit.edu> Link: http://lkml.kernel.org/r/2c4ed5c2c2e93603790229e0c3403ae506ccc0cb.1311277573.git.luto@mit.edu Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/vdso/vdso.S15
-rw-r--r--arch/x86/vdso/vma.c25
2 files changed, 19 insertions, 21 deletions
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 1d3aa6b87181..1b979c12ba85 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,10 +1,21 @@
1#include <asm/page_types.h>
2#include <linux/linkage.h>
1#include <linux/init.h> 3#include <linux/init.h>
2 4
3__INITDATA 5__PAGE_ALIGNED_DATA
4 6
5 .globl vdso_start, vdso_end 7 .globl vdso_start, vdso_end
8 .align PAGE_SIZE
6vdso_start: 9vdso_start:
7 .incbin "arch/x86/vdso/vdso.so" 10 .incbin "arch/x86/vdso/vdso.so"
8vdso_end: 11vdso_end:
9 12
10__FINIT 13.previous
14
15 .globl vdso_pages
16 .bss
17 .align 8
18 .type vdso_pages, @object
19vdso_pages:
20 .zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
21 .size vdso_pages, .-vdso_pages
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index c39938d1332f..316fbca3490e 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -14,13 +14,14 @@
14#include <asm/vgtod.h> 14#include <asm/vgtod.h>
15#include <asm/proto.h> 15#include <asm/proto.h>
16#include <asm/vdso.h> 16#include <asm/vdso.h>
17#include <asm/page.h>
17 18
18unsigned int __read_mostly vdso_enabled = 1; 19unsigned int __read_mostly vdso_enabled = 1;
19 20
20extern char vdso_start[], vdso_end[]; 21extern char vdso_start[], vdso_end[];
21extern unsigned short vdso_sync_cpuid; 22extern unsigned short vdso_sync_cpuid;
22 23
23static struct page **vdso_pages; 24extern struct page *vdso_pages[];
24static unsigned vdso_size; 25static unsigned vdso_size;
25 26
26static void __init patch_vdso(void *vdso, size_t len) 27static void __init patch_vdso(void *vdso, size_t len)
@@ -54,7 +55,7 @@ found:
54 apply_alternatives(alt_data, alt_data + alt_sec->sh_size); 55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
55} 56}
56 57
57static int __init init_vdso_vars(void) 58static int __init init_vdso(void)
58{ 59{
59 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; 60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
60 int i; 61 int i;
@@ -62,26 +63,12 @@ static int __init init_vdso_vars(void)
62 patch_vdso(vdso_start, vdso_end - vdso_start); 63 patch_vdso(vdso_start, vdso_end - vdso_start);
63 64
64 vdso_size = npages << PAGE_SHIFT; 65 vdso_size = npages << PAGE_SHIFT;
65 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); 66 for (i = 0; i < npages; i++)
66 if (!vdso_pages) 67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
67 goto oom;
68 for (i = 0; i < npages; i++) {
69 struct page *p;
70 p = alloc_page(GFP_KERNEL);
71 if (!p)
72 goto oom;
73 vdso_pages[i] = p;
74 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
75 }
76 68
77 return 0; 69 return 0;
78
79 oom:
80 printk("Cannot allocate vdso\n");
81 vdso_enabled = 0;
82 return -ENOMEM;
83} 70}
84subsys_initcall(init_vdso_vars); 71subsys_initcall(init_vdso);
85 72
86struct linux_binprm; 73struct linux_binprm;
87 74