aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/entry_32.S10
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/vdso/Makefile24
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
-rw-r--r--arch/x86/vdso/vdso-fakesections.c41
-rw-r--r--arch/x86/vdso/vdso-layout.lds.S64
-rw-r--r--arch/x86/vdso/vdso.lds.S2
-rw-r--r--arch/x86/vdso/vdso2c.c73
-rw-r--r--arch/x86/vdso/vdso2c.h199
-rw-r--r--arch/x86/vdso/vdso32/vdso-fakesections.c1
-rw-r--r--arch/x86/vdso/vdsox32.lds.S2
11 files changed, 297 insertions, 124 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0da82b8e634..dbaa23e78b36 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -423,9 +423,10 @@ sysenter_past_esp:
423 jnz sysenter_audit 423 jnz sysenter_audit
424sysenter_do_call: 424sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae syscall_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp) 428 movl %eax,PT_EAX(%esp)
429sysenter_after_call:
429 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
430 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
431 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -675,7 +676,12 @@ END(syscall_fault)
675 676
676syscall_badsys: 677syscall_badsys:
677 movl $-ENOSYS,PT_EAX(%esp) 678 movl $-ENOSYS,PT_EAX(%esp)
678 jmp resume_userspace 679 jmp syscall_exit
680END(syscall_badsys)
681
682sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp)
684 jmp sysenter_after_call
679END(syscall_badsys) 685END(syscall_badsys)
680 CFI_ENDPROC 686 CFI_ENDPROC
681 687
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index a0da58db43a8..2851d63c1202 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
363 363
364 /* Set up to return from userspace. */ 364 /* Set up to return from userspace. */
365 restorer = current->mm->context.vdso + 365 restorer = current->mm->context.vdso +
366 selected_vdso32->sym___kernel_sigreturn; 366 selected_vdso32->sym___kernel_rt_sigreturn;
367 if (ksig->ka.sa.sa_flags & SA_RESTORER) 367 if (ksig->ka.sa.sa_flags & SA_RESTORER)
368 restorer = ksig->ka.sa.sa_restorer; 368 restorer = ksig->ka.sa.sa_restorer;
369 put_user_ex(restorer, &frame->pretcode); 369 put_user_ex(restorer, &frame->pretcode);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 3c0809a0631f..61b04fe36e66 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT) := y
11 11
12# files to link into the vdso 12# files to link into the vdso
13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o 13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
14vobjs-nox32 := vdso-fakesections.o
15 14
16# files to link into kernel 15# files to link into kernel
17obj-y += vma.o 16obj-y += vma.o
@@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
67# 66#
68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 67CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 68 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
70 -fno-omit-frame-pointer -foptimize-sibling-calls 69 -fno-omit-frame-pointer -foptimize-sibling-calls \
70 -DDISABLE_BRANCH_PROFILING
71 71
72$(vobjs): KBUILD_CFLAGS += $(CFL) 72$(vobjs): KBUILD_CFLAGS += $(CFL)
73 73
@@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
134 134
135targets += vdso32/vdso32.lds 135targets += vdso32/vdso32.lds
136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o) 136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
137targets += vdso32/vclock_gettime.o 137targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
138 138
139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%) 139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
140 140
@@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) 150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
153KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
153$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 154$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
154 155
155$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ 156$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
156 $(obj)/vdso32/vdso32.lds \ 157 $(obj)/vdso32/vdso32.lds \
157 $(obj)/vdso32/vclock_gettime.o \ 158 $(obj)/vdso32/vclock_gettime.o \
159 $(obj)/vdso32/vdso-fakesections.o \
158 $(obj)/vdso32/note.o \ 160 $(obj)/vdso32/note.o \
159 $(obj)/vdso32/%.o 161 $(obj)/vdso32/%.o
160 $(call if_changed,vdso) 162 $(call if_changed,vdso)
@@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO $@
169 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' 171 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
170 172
171VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ 173VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
172 -Wl,-Bsymbolic $(LTO_CFLAGS) 174 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
173GCOV_PROFILE := n 175GCOV_PROFILE := n
174 176
175# 177#
176# Install the unstripped copies of vdso*.so. 178# Install the unstripped copies of vdso*.so. If our toolchain supports
179# build-id, install .build-id links as well.
177# 180#
178quiet_cmd_vdso_install = INSTALL $(@:install_%=%) 181quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
179 cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) 182define cmd_vdso_install
183 cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
184 if readelf -n $< |grep -q 'Build ID'; then \
185 buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
186 first=`echo $$buildid | cut -b-2`; \
187 last=`echo $$buildid | cut -b3-`; \
188 mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
189 ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
190 fi
191endef
180 192
181vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) 193vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
182 194
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index b2e4f493e5b0..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -11,9 +11,6 @@
11 * Check with readelf after changing. 11 * Check with readelf after changing.
12 */ 12 */
13 13
14/* Disable profiling for userspace code: */
15#define DISABLE_BRANCH_PROFILING
16
17#include <uapi/linux/time.h> 14#include <uapi/linux/time.h>
18#include <asm/vgtod.h> 15#include <asm/vgtod.h>
19#include <asm/hpet.h> 16#include <asm/hpet.h>
diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c
index cb8a8d72c24b..aa5fbfab20a5 100644
--- a/arch/x86/vdso/vdso-fakesections.c
+++ b/arch/x86/vdso/vdso-fakesections.c
@@ -2,31 +2,20 @@
2 * Copyright 2014 Andy Lutomirski 2 * Copyright 2014 Andy Lutomirski
3 * Subject to the GNU Public License, v.2 3 * Subject to the GNU Public License, v.2
4 * 4 *
5 * Hack to keep broken Go programs working. 5 * String table for loadable section headers. See vdso2c.h for why
6 * 6 * this exists.
7 * The Go runtime had a couple of bugs: it would read the section table to try
8 * to figure out how many dynamic symbols there were (it shouldn't have looked
9 * at the section table at all) and, if there were no SHT_SYNDYM section table
10 * entry, it would use an uninitialized value for the number of symbols. As a
11 * workaround, we supply a minimal section table. vdso2c will adjust the
12 * in-memory image so that "vdso_fake_sections" becomes the section table.
13 *
14 * The bug was introduced by:
15 * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
16 * and is being addressed in the Go runtime in this issue:
17 * https://code.google.com/p/go/issues/detail?id=8197
18 */ 7 */
19 8
20#ifndef __x86_64__ 9const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
21#error This hack is specific to the 64-bit vDSO 10 ".hash\0"
22#endif 11 ".dynsym\0"
23 12 ".dynstr\0"
24#include <linux/elf.h> 13 ".gnu.version\0"
25 14 ".gnu.version_d\0"
26extern const __visible struct elf64_shdr vdso_fake_sections[]; 15 ".dynamic\0"
27const __visible struct elf64_shdr vdso_fake_sections[] = { 16 ".rodata\0"
28 { 17 ".fake_shstrtab\0" /* Yay, self-referential code. */
29 .sh_type = SHT_DYNSYM, 18 ".note\0"
30 .sh_entsize = sizeof(Elf64_Sym), 19 ".eh_frame_hdr\0"
31 } 20 ".eh_frame\0"
32}; 21 ".text";
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 2ec72f651ebf..9197544eea9a 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -6,6 +6,16 @@
6 * This script controls its layout. 6 * This script controls its layout.
7 */ 7 */
8 8
9#if defined(BUILD_VDSO64)
10# define SHDR_SIZE 64
11#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
12# define SHDR_SIZE 40
13#else
14# error unknown VDSO target
15#endif
16
17#define NUM_FAKE_SHDRS 13
18
9SECTIONS 19SECTIONS
10{ 20{
11 . = SIZEOF_HEADERS; 21 . = SIZEOF_HEADERS;
@@ -18,36 +28,53 @@ SECTIONS
18 .gnu.version_d : { *(.gnu.version_d) } 28 .gnu.version_d : { *(.gnu.version_d) }
19 .gnu.version_r : { *(.gnu.version_r) } 29 .gnu.version_r : { *(.gnu.version_r) }
20 30
31 .dynamic : { *(.dynamic) } :text :dynamic
32
33 .rodata : {
34 *(.rodata*)
35 *(.data*)
36 *(.sdata*)
37 *(.got.plt) *(.got)
38 *(.gnu.linkonce.d.*)
39 *(.bss*)
40 *(.dynbss*)
41 *(.gnu.linkonce.b.*)
42
43 /*
44 * Ideally this would live in a C file, but that won't
45 * work cleanly for x32 until we start building the x32
46 * C code using an x32 toolchain.
47 */
48 VDSO_FAKE_SECTION_TABLE_START = .;
49 . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
50 VDSO_FAKE_SECTION_TABLE_END = .;
51 } :text
52
53 .fake_shstrtab : { *(.fake_shstrtab) } :text
54
55
21 .note : { *(.note.*) } :text :note 56 .note : { *(.note.*) } :text :note
22 57
23 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 58 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
24 .eh_frame : { KEEP (*(.eh_frame)) } :text 59 .eh_frame : { KEEP (*(.eh_frame)) } :text
25 60
26 .dynamic : { *(.dynamic) } :text :dynamic
27
28 .rodata : { *(.rodata*) } :text
29 .data : {
30 *(.data*)
31 *(.sdata*)
32 *(.got.plt) *(.got)
33 *(.gnu.linkonce.d.*)
34 *(.bss*)
35 *(.dynbss*)
36 *(.gnu.linkonce.b.*)
37 }
38
39 .altinstructions : { *(.altinstructions) }
40 .altinstr_replacement : { *(.altinstr_replacement) }
41 61
42 /* 62 /*
43 * Align the actual code well away from the non-instruction data. 63 * Text is well-separated from actual data: there's plenty of
44 * This is the best thing for the I-cache. 64 * stuff that isn't used at runtime in between.
45 */ 65 */
46 . = ALIGN(0x100);
47 66
48 .text : { *(.text*) } :text =0x90909090, 67 .text : { *(.text*) } :text =0x90909090,
49 68
50 /* 69 /*
70 * At the end so that eu-elflint stays happy when vdso2c strips
71 * these. A better implementation would avoid allocating space
72 * for these.
73 */
74 .altinstructions : { *(.altinstructions) } :text
75 .altinstr_replacement : { *(.altinstr_replacement) } :text
76
77 /*
51 * The remainder of the vDSO consists of special pages that are 78 * The remainder of the vDSO consists of special pages that are
52 * shared between the kernel and userspace. It needs to be at the 79 * shared between the kernel and userspace. It needs to be at the
53 * end so that it doesn't overlap the mapping of the actual 80 * end so that it doesn't overlap the mapping of the actual
@@ -75,6 +102,7 @@ SECTIONS
75 /DISCARD/ : { 102 /DISCARD/ : {
76 *(.discard) 103 *(.discard)
77 *(.discard.*) 104 *(.discard.*)
105 *(__bug_table)
78 } 106 }
79} 107}
80 108
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
index 75e3404c83b1..6807932643c2 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/vdso/vdso.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSO64
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 7a6bf50f9165..238dbe82776e 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -23,6 +23,8 @@ enum {
23 sym_vvar_page, 23 sym_vvar_page,
24 sym_hpet_page, 24 sym_hpet_page,
25 sym_end_mapping, 25 sym_end_mapping,
26 sym_VDSO_FAKE_SECTION_TABLE_START,
27 sym_VDSO_FAKE_SECTION_TABLE_END,
26}; 28};
27 29
28const int special_pages[] = { 30const int special_pages[] = {
@@ -30,15 +32,26 @@ const int special_pages[] = {
30 sym_hpet_page, 32 sym_hpet_page,
31}; 33};
32 34
33char const * const required_syms[] = { 35struct vdso_sym {
34 [sym_vvar_page] = "vvar_page", 36 const char *name;
35 [sym_hpet_page] = "hpet_page", 37 bool export;
36 [sym_end_mapping] = "end_mapping", 38};
37 "VDSO32_NOTE_MASK", 39
38 "VDSO32_SYSENTER_RETURN", 40struct vdso_sym required_syms[] = {
39 "__kernel_vsyscall", 41 [sym_vvar_page] = {"vvar_page", true},
40 "__kernel_sigreturn", 42 [sym_hpet_page] = {"hpet_page", true},
41 "__kernel_rt_sigreturn", 43 [sym_end_mapping] = {"end_mapping", true},
44 [sym_VDSO_FAKE_SECTION_TABLE_START] = {
45 "VDSO_FAKE_SECTION_TABLE_START", false
46 },
47 [sym_VDSO_FAKE_SECTION_TABLE_END] = {
48 "VDSO_FAKE_SECTION_TABLE_END", false
49 },
50 {"VDSO32_NOTE_MASK", true},
51 {"VDSO32_SYSENTER_RETURN", true},
52 {"__kernel_vsyscall", true},
53 {"__kernel_sigreturn", true},
54 {"__kernel_rt_sigreturn", true},
42}; 55};
43 56
44__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 57__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@@ -83,37 +96,21 @@ extern void bad_put_le(void);
83 96
84#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) 97#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
85 98
86#define BITS 64 99#define BITSFUNC3(name, bits) name##bits
87#define GOFUNC go64 100#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
88#define Elf_Ehdr Elf64_Ehdr 101#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
89#define Elf_Shdr Elf64_Shdr 102
90#define Elf_Phdr Elf64_Phdr 103#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
91#define Elf_Sym Elf64_Sym 104#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
92#define Elf_Dyn Elf64_Dyn 105#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
106
107#define ELF_BITS 64
93#include "vdso2c.h" 108#include "vdso2c.h"
94#undef BITS 109#undef ELF_BITS
95#undef GOFUNC 110
96#undef Elf_Ehdr 111#define ELF_BITS 32
97#undef Elf_Shdr
98#undef Elf_Phdr
99#undef Elf_Sym
100#undef Elf_Dyn
101
102#define BITS 32
103#define GOFUNC go32
104#define Elf_Ehdr Elf32_Ehdr
105#define Elf_Shdr Elf32_Shdr
106#define Elf_Phdr Elf32_Phdr
107#define Elf_Sym Elf32_Sym
108#define Elf_Dyn Elf32_Dyn
109#include "vdso2c.h" 112#include "vdso2c.h"
110#undef BITS 113#undef ELF_BITS
111#undef GOFUNC
112#undef Elf_Ehdr
113#undef Elf_Shdr
114#undef Elf_Phdr
115#undef Elf_Sym
116#undef Elf_Dyn
117 114
118static void go(void *addr, size_t len, FILE *outfile, const char *name) 115static void go(void *addr, size_t len, FILE *outfile, const char *name)
119{ 116{
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index c6eefaf389b9..df95a2fdff73 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -4,23 +4,136 @@
4 * are built for 32-bit userspace. 4 * are built for 32-bit userspace.
5 */ 5 */
6 6
7static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) 7/*
8 * We're writing a section table for a few reasons:
9 *
10 * The Go runtime had a couple of bugs: it would read the section
11 * table to try to figure out how many dynamic symbols there were (it
12 * shouldn't have looked at the section table at all) and, if there
13 * were no SHT_SYNDYM section table entry, it would use an
14 * uninitialized value for the number of symbols. An empty DYNSYM
15 * table would work, but I see no reason not to write a valid one (and
16 * keep full performance for old Go programs). This hack is only
17 * needed on x86_64.
18 *
19 * The bug was introduced on 2012-08-31 by:
20 * https://code.google.com/p/go/source/detail?r=56ea40aac72b
21 * and was fixed on 2014-06-13 by:
22 * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
23 *
24 * Binutils has issues debugging the vDSO: it reads the section table to
25 * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
26 * would break build-id if we removed the section table. Binutils
27 * also requires that shstrndx != 0. See:
28 * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
29 *
30 * elfutils might not look for PT_NOTE if there is a section table at
31 * all. I don't know whether this matters for any practical purpose.
32 *
33 * For simplicity, rather than hacking up a partial section table, we
34 * just write a mostly complete one. We omit non-dynamic symbols,
35 * though, since they're rather large.
36 *
37 * Once binutils gets fixed, we might be able to drop this for all but
38 * the 64-bit vdso, since build-id only works in kernel RPMs, and
39 * systems that update to new enough kernel RPMs will likely update
40 * binutils in sync. build-id has never worked for home-built kernel
41 * RPMs without manual symlinking, and I suspect that no one ever does
42 * that.
43 */
44struct BITSFUNC(fake_sections)
45{
46 ELF(Shdr) *table;
47 unsigned long table_offset;
48 int count, max_count;
49
50 int in_shstrndx;
51 unsigned long shstr_offset;
52 const char *shstrtab;
53 size_t shstrtab_len;
54
55 int out_shstrndx;
56};
57
58static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
59 const char *name)
60{
61 const char *outname = out->shstrtab;
62 while (outname - out->shstrtab < out->shstrtab_len) {
63 if (!strcmp(name, outname))
64 return (outname - out->shstrtab) + out->shstr_offset;
65 outname += strlen(outname) + 1;
66 }
67
68 if (*name)
69 printf("Warning: could not find output name \"%s\"\n", name);
70 return out->shstr_offset + out->shstrtab_len - 1; /* Use a null. */
71}
72
73static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
74{
75 if (!out->in_shstrndx)
76 fail("didn't find the fake shstrndx\n");
77
78 memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
79
80 if (out->max_count < 1)
81 fail("we need at least two fake output sections\n");
82
83 PUT_LE(&out->table[0].sh_type, SHT_NULL);
84 PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
85
86 out->count = 1;
87}
88
89static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
90 int in_idx, const ELF(Shdr) *in,
91 const char *name)
92{
93 uint64_t flags = GET_LE(&in->sh_flags);
94
95 bool copy = flags & SHF_ALLOC &&
96 strcmp(name, ".altinstructions") &&
97 strcmp(name, ".altinstr_replacement");
98
99 if (!copy)
100 return;
101
102 if (out->count >= out->max_count)
103 fail("too many copied sections (max = %d)\n", out->max_count);
104
105 if (in_idx == out->in_shstrndx)
106 out->out_shstrndx = out->count;
107
108 out->table[out->count] = *in;
109 PUT_LE(&out->table[out->count].sh_name,
110 BITSFUNC(find_shname)(out, name));
111
112 /* elfutils requires that a strtab have the correct type. */
113 if (!strcmp(name, ".fake_shstrtab"))
114 PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
115
116 out->count++;
117}
118
119static void BITSFUNC(go)(void *addr, size_t len,
120 FILE *outfile, const char *name)
8{ 121{
9 int found_load = 0; 122 int found_load = 0;
10 unsigned long load_size = -1; /* Work around bogus warning */ 123 unsigned long load_size = -1; /* Work around bogus warning */
11 unsigned long data_size; 124 unsigned long data_size;
12 Elf_Ehdr *hdr = (Elf_Ehdr *)addr; 125 ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
13 int i; 126 int i;
14 unsigned long j; 127 unsigned long j;
15 Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, 128 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
16 *alt_sec = NULL; 129 *alt_sec = NULL;
17 Elf_Dyn *dyn = 0, *dyn_end = 0; 130 ELF(Dyn) *dyn = 0, *dyn_end = 0;
18 const char *secstrings; 131 const char *secstrings;
19 uint64_t syms[NSYMS] = {}; 132 uint64_t syms[NSYMS] = {};
20 133
21 uint64_t fake_sections_value = 0, fake_sections_size = 0; 134 struct BITSFUNC(fake_sections) fake_sections = {};
22 135
23 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); 136 ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
24 137
25 /* Walk the segment table. */ 138 /* Walk the segment table. */
26 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { 139 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@@ -51,7 +164,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
51 for (i = 0; dyn + i < dyn_end && 164 for (i = 0; dyn + i < dyn_end &&
52 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { 165 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
53 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); 166 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
54 if (tag == DT_REL || tag == DT_RELSZ || 167 if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
55 tag == DT_RELENT || tag == DT_TEXTREL) 168 tag == DT_RELENT || tag == DT_TEXTREL)
56 fail("vdso image contains dynamic relocations\n"); 169 fail("vdso image contains dynamic relocations\n");
57 } 170 }
@@ -61,7 +174,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
61 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); 174 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
62 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset); 175 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
63 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { 176 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
64 Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) + 177 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
65 GET_LE(&hdr->e_shentsize) * i; 178 GET_LE(&hdr->e_shentsize) * i;
66 if (GET_LE(&sh->sh_type) == SHT_SYMTAB) 179 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
67 symtab_hdr = sh; 180 symtab_hdr = sh;
@@ -82,29 +195,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
82 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); 195 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
83 i++) { 196 i++) {
84 int k; 197 int k;
85 Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) + 198 ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
86 GET_LE(&symtab_hdr->sh_entsize) * i; 199 GET_LE(&symtab_hdr->sh_entsize) * i;
87 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + 200 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
88 GET_LE(&sym->st_name); 201 GET_LE(&sym->st_name);
89 202
90 for (k = 0; k < NSYMS; k++) { 203 for (k = 0; k < NSYMS; k++) {
91 if (!strcmp(name, required_syms[k])) { 204 if (!strcmp(name, required_syms[k].name)) {
92 if (syms[k]) { 205 if (syms[k]) {
93 fail("duplicate symbol %s\n", 206 fail("duplicate symbol %s\n",
94 required_syms[k]); 207 required_syms[k].name);
95 } 208 }
96 syms[k] = GET_LE(&sym->st_value); 209 syms[k] = GET_LE(&sym->st_value);
97 } 210 }
98 } 211 }
99 212
100 if (!strcmp(name, "vdso_fake_sections")) { 213 if (!strcmp(name, "fake_shstrtab")) {
101 if (fake_sections_value) 214 ELF(Shdr) *sh;
102 fail("duplicate vdso_fake_sections\n"); 215
103 fake_sections_value = GET_LE(&sym->st_value); 216 fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
104 fake_sections_size = GET_LE(&sym->st_size); 217 fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
218 fake_sections.shstrtab_len = GET_LE(&sym->st_size);
219 sh = addr + GET_LE(&hdr->e_shoff) +
220 GET_LE(&hdr->e_shentsize) *
221 fake_sections.in_shstrndx;
222 fake_sections.shstr_offset = GET_LE(&sym->st_value) -
223 GET_LE(&sh->sh_addr);
105 } 224 }
106 } 225 }
107 226
227 /* Build the output section table. */
228 if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
229 !syms[sym_VDSO_FAKE_SECTION_TABLE_END])
230 fail("couldn't find fake section table\n");
231 if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
232 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
233 fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
234 fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
235 fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
236 fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
237 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
238 sizeof(ELF(Shdr));
239
240 BITSFUNC(init_sections)(&fake_sections);
241 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
242 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
243 GET_LE(&hdr->e_shentsize) * i;
244 BITSFUNC(copy_section)(&fake_sections, i, sh,
245 secstrings + GET_LE(&sh->sh_name));
246 }
247 if (!fake_sections.out_shstrndx)
248 fail("didn't generate shstrndx?!?\n");
249
250 PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
251 PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
252 PUT_LE(&hdr->e_shnum, fake_sections.count);
253 PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
254
108 /* Validate mapping addresses. */ 255 /* Validate mapping addresses. */
109 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) { 256 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
110 if (!syms[i]) 257 if (!syms[i])
@@ -112,25 +259,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
112 259
113 if (syms[i] % 4096) 260 if (syms[i] % 4096)
114 fail("%s must be a multiple of 4096\n", 261 fail("%s must be a multiple of 4096\n",
115 required_syms[i]); 262 required_syms[i].name);
116 if (syms[i] < data_size) 263 if (syms[i] < data_size)
117 fail("%s must be after the text mapping\n", 264 fail("%s must be after the text mapping\n",
118 required_syms[i]); 265 required_syms[i].name);
119 if (syms[sym_end_mapping] < syms[i] + 4096) 266 if (syms[sym_end_mapping] < syms[i] + 4096)
120 fail("%s overruns end_mapping\n", required_syms[i]); 267 fail("%s overruns end_mapping\n",
268 required_syms[i].name);
121 } 269 }
122 if (syms[sym_end_mapping] % 4096) 270 if (syms[sym_end_mapping] % 4096)
123 fail("end_mapping must be a multiple of 4096\n"); 271 fail("end_mapping must be a multiple of 4096\n");
124 272
125 /* Remove sections or use fakes */
126 if (fake_sections_size % sizeof(Elf_Shdr))
127 fail("vdso_fake_sections size is not a multiple of %ld\n",
128 (long)sizeof(Elf_Shdr));
129 PUT_LE(&hdr->e_shoff, fake_sections_value);
130 PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
131 PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
132 PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
133
134 if (!name) { 273 if (!name) {
135 fwrite(addr, load_size, 1, outfile); 274 fwrite(addr, load_size, 1, outfile);
136 return; 275 return;
@@ -168,9 +307,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
168 (unsigned long)GET_LE(&alt_sec->sh_size)); 307 (unsigned long)GET_LE(&alt_sec->sh_size));
169 } 308 }
170 for (i = 0; i < NSYMS; i++) { 309 for (i = 0; i < NSYMS; i++) {
171 if (syms[i]) 310 if (required_syms[i].export && syms[i])
172 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n", 311 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
173 required_syms[i], syms[i]); 312 required_syms[i].name, syms[i]);
174 } 313 }
175 fprintf(outfile, "};\n"); 314 fprintf(outfile, "};\n");
176} 315}
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
new file mode 100644
index 000000000000..541468e25265
--- /dev/null
+++ b/arch/x86/vdso/vdso32/vdso-fakesections.c
@@ -0,0 +1 @@
#include "../vdso-fakesections.c"
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
index 46b991b578a8..697c11ece90c 100644
--- a/arch/x86/vdso/vdsox32.lds.S
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSOX32
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*