aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/vmlinux.lds.S
diff options
context:
space:
mode:
authorAnders Kaseorg <andersk@ksplice.com>2009-09-16 16:44:26 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-09-18 13:21:45 -0400
commitd223246ef7e6d73c8e3d9b58f27f2eb3fe95e25d (patch)
tree7d2d5d3ed0825fe2a832369af55ca76b71afee97 /arch/x86/kernel/vmlinux.lds.S
parentdaf7b9c9216e2b82e4c14b7248a85286dab021c3 (diff)
x86: fix fragile computation of vsyscall address
Previously, the address of the vsyscall page (VSYSCALL_PHYS_ADDR, VSYSCALL_VIRT_ADDR) was computed by arithmetic on the address of the last section. This leads to bugs when new sections are inserted, such as the one fixed by commit d312ceda567ab91acd756cde95ac5fbc6b40ed40. Let's compute it from the current address instead. Signed-off-by: Anders Kaseorg <andersk@ksplice.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/vmlinux.lds.S')
-rw-r--r--arch/x86/kernel/vmlinux.lds.S19
1 files changed, 7 insertions, 12 deletions
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0ccb57d5ee35..b96ca472fa26 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -135,24 +135,21 @@ SECTIONS
135#ifdef CONFIG_X86_64 135#ifdef CONFIG_X86_64
136 136
137#define VSYSCALL_ADDR (-10*1024*1024) 137#define VSYSCALL_ADDR (-10*1024*1024)
138#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
139 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
140#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
141 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
142 138
143#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) 139#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
144#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) 140#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
145 141
146#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) 142#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
147#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 143#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
148 144
145 . = ALIGN(4096);
146 __vsyscall_0 = .;
147
149 . = VSYSCALL_ADDR; 148 . = VSYSCALL_ADDR;
150 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { 149 .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
151 *(.vsyscall_0) 150 *(.vsyscall_0)
152 } :user 151 } :user
153 152
154 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
155
156 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 153 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
157 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { 154 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
158 *(.vsyscall_fn) 155 *(.vsyscall_fn)
@@ -192,11 +189,9 @@ SECTIONS
192 *(.vsyscall_3) 189 *(.vsyscall_3)
193 } 190 }
194 191
195 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; 192 . = __vsyscall_0 + PAGE_SIZE;
196 193
197#undef VSYSCALL_ADDR 194#undef VSYSCALL_ADDR
198#undef VSYSCALL_PHYS_ADDR
199#undef VSYSCALL_VIRT_ADDR
200#undef VLOAD_OFFSET 195#undef VLOAD_OFFSET
201#undef VLOAD 196#undef VLOAD
202#undef VVIRT_OFFSET 197#undef VVIRT_OFFSET