diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2005-06-25 17:57:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:47 -0400 |
commit | 5ded01e83ec3b60191b03b9f88f53acd4e6112f5 (patch) | |
tree | e53840eb0f96da69672dae00126602e8777203d7 | |
parent | ad0d75ebacdbf1004d004803df0ba371c6bdbe2a (diff) |
[PATCH] kexec: x86_64: vmlinux: fix physical addresses
The vmlinux on x86_64 does not report the correct physical address of
the kernel. Instead in the physical address field it currently
reports the virtual address of the kernel.
This is patch is a bug fix that corrects vmlinux to report the
proper physical addresses.
This is potentially a help for crash dump analysis tools.
This definitiely allows bootloaders that load vmlinux as a standard
ELF executable. Bootloaders directly loading vmlinux become of
practical importance when we consider the kexec on panic case.
Signed-off-by: Eric Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/x86_64/Makefile | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/vmlinux.lds.S | 128 |
2 files changed, 87 insertions, 43 deletions
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 6f90c246c418..8a73794f9b90 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile | |||
@@ -35,7 +35,7 @@ export IA32_CC IA32_LD IA32_AS IA32_OBJCOPY IA32_CPP | |||
35 | 35 | ||
36 | LDFLAGS := -m elf_x86_64 | 36 | LDFLAGS := -m elf_x86_64 |
37 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 37 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
38 | LDFLAGS_vmlinux := -e stext | 38 | LDFLAGS_vmlinux := |
39 | 39 | ||
40 | CHECKFLAGS += -D__x86_64__ -m64 | 40 | CHECKFLAGS += -D__x86_64__ -m64 |
41 | 41 | ||
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 59ebd5beda87..73389f51c4e5 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S | |||
@@ -2,7 +2,10 @@ | |||
2 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>; | 2 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>; |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define LOAD_OFFSET __START_KERNEL_map | ||
6 | |||
5 | #include <asm-generic/vmlinux.lds.h> | 7 | #include <asm-generic/vmlinux.lds.h> |
8 | #include <asm/page.h> | ||
6 | #include <linux/config.h> | 9 | #include <linux/config.h> |
7 | 10 | ||
8 | OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") | 11 | OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") |
@@ -11,28 +14,30 @@ ENTRY(phys_startup_64) | |||
11 | jiffies_64 = jiffies; | 14 | jiffies_64 = jiffies; |
12 | SECTIONS | 15 | SECTIONS |
13 | { | 16 | { |
14 | . = 0xffffffff80100000; | 17 | . = __START_KERNEL; |
15 | phys_startup_64 = startup_64 - LOAD_OFFSET; | 18 | phys_startup_64 = startup_64 - LOAD_OFFSET; |
16 | _text = .; /* Text and read-only data */ | 19 | _text = .; /* Text and read-only data */ |
17 | .text : { | 20 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
18 | *(.text) | 21 | *(.text) |
19 | SCHED_TEXT | 22 | SCHED_TEXT |
20 | LOCK_TEXT | 23 | LOCK_TEXT |
21 | *(.fixup) | 24 | *(.fixup) |
22 | *(.gnu.warning) | 25 | *(.gnu.warning) |
23 | } = 0x9090 | 26 | } = 0x9090 |
24 | .text.lock : { *(.text.lock) } /* out-of-line lock text */ | 27 | /* out-of-line lock text */ |
28 | .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) } | ||
25 | 29 | ||
26 | _etext = .; /* End of text section */ | 30 | _etext = .; /* End of text section */ |
27 | 31 | ||
28 | . = ALIGN(16); /* Exception table */ | 32 | . = ALIGN(16); /* Exception table */ |
29 | __start___ex_table = .; | 33 | __start___ex_table = .; |
30 | __ex_table : { *(__ex_table) } | 34 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } |
31 | __stop___ex_table = .; | 35 | __stop___ex_table = .; |
32 | 36 | ||
33 | RODATA | 37 | RODATA |
34 | 38 | ||
35 | .data : { /* Data */ | 39 | /* Data */ |
40 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | ||
36 | *(.data) | 41 | *(.data) |
37 | CONSTRUCTORS | 42 | CONSTRUCTORS |
38 | } | 43 | } |
@@ -40,62 +45,95 @@ SECTIONS | |||
40 | _edata = .; /* End of data section */ | 45 | _edata = .; /* End of data section */ |
41 | 46 | ||
42 | __bss_start = .; /* BSS */ | 47 | __bss_start = .; /* BSS */ |
43 | .bss : { | 48 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { |
44 | *(.bss.page_aligned) | 49 | *(.bss.page_aligned) |
45 | *(.bss) | 50 | *(.bss) |
46 | } | 51 | } |
47 | __bss_end = .; | 52 | __bss_end = .; |
48 | 53 | ||
54 | . = ALIGN(PAGE_SIZE); | ||
49 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 55 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
50 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 56 | .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { |
57 | *(.data.cacheline_aligned) | ||
58 | } | ||
59 | |||
60 | #define VSYSCALL_ADDR (-10*1024*1024) | ||
61 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095)) | ||
62 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095)) | ||
63 | |||
64 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) | ||
65 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) | ||
66 | |||
67 | #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) | ||
68 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | ||
51 | 69 | ||
52 | #define AFTER(x) BINALIGN(LOADADDR(x) + SIZEOF(x), 16) | 70 | . = VSYSCALL_ADDR; |
53 | #define BINALIGN(x,y) (((x) + (y) - 1) & ~((y) - 1)) | 71 | .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } |
54 | #define CACHE_ALIGN(x) BINALIGN(x, CONFIG_X86_L1_CACHE_BYTES) | 72 | __vsyscall_0 = VSYSCALL_VIRT_ADDR; |
55 | 73 | ||
56 | .vsyscall_0 -10*1024*1024: AT ((LOADADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095)) { *(.vsyscall_0) } | ||
57 | __vsyscall_0 = LOADADDR(.vsyscall_0); | ||
58 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 74 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
59 | .xtime_lock : AT CACHE_ALIGN(AFTER(.vsyscall_0)) { *(.xtime_lock) } | 75 | .xtime_lock : AT(VLOAD(.xtime_lock)) { *(.xtime_lock) } |
60 | xtime_lock = LOADADDR(.xtime_lock); | 76 | xtime_lock = VVIRT(.xtime_lock); |
61 | .vxtime : AT AFTER(.xtime_lock) { *(.vxtime) } | 77 | |
62 | vxtime = LOADADDR(.vxtime); | 78 | .vxtime : AT(VLOAD(.vxtime)) { *(.vxtime) } |
63 | .wall_jiffies : AT AFTER(.vxtime) { *(.wall_jiffies) } | 79 | vxtime = VVIRT(.vxtime); |
64 | wall_jiffies = LOADADDR(.wall_jiffies); | 80 | |
65 | .sys_tz : AT AFTER(.wall_jiffies) { *(.sys_tz) } | 81 | .wall_jiffies : AT(VLOAD(.wall_jiffies)) { *(.wall_jiffies) } |
66 | sys_tz = LOADADDR(.sys_tz); | 82 | wall_jiffies = VVIRT(.wall_jiffies); |
67 | .sysctl_vsyscall : AT AFTER(.sys_tz) { *(.sysctl_vsyscall) } | 83 | |
68 | sysctl_vsyscall = LOADADDR(.sysctl_vsyscall); | 84 | .sys_tz : AT(VLOAD(.sys_tz)) { *(.sys_tz) } |
69 | .xtime : AT AFTER(.sysctl_vsyscall) { *(.xtime) } | 85 | sys_tz = VVIRT(.sys_tz); |
70 | xtime = LOADADDR(.xtime); | 86 | |
87 | .sysctl_vsyscall : AT(VLOAD(.sysctl_vsyscall)) { *(.sysctl_vsyscall) } | ||
88 | sysctl_vsyscall = VVIRT(.sysctl_vsyscall); | ||
89 | |||
90 | .xtime : AT(VLOAD(.xtime)) { *(.xtime) } | ||
91 | xtime = VVIRT(.xtime); | ||
92 | |||
71 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 93 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
72 | .jiffies : AT CACHE_ALIGN(AFTER(.xtime)) { *(.jiffies) } | 94 | .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) } |
73 | jiffies = LOADADDR(.jiffies); | 95 | jiffies = VVIRT(.jiffies); |
74 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT (LOADADDR(.vsyscall_0) + 1024) { *(.vsyscall_1) } | 96 | |
75 | . = LOADADDR(.vsyscall_0) + 4096; | 97 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { *(.vsyscall_1) } |
98 | .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { *(.vsyscall_2) } | ||
99 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } | ||
100 | |||
101 | . = VSYSCALL_VIRT_ADDR + 4096; | ||
102 | |||
103 | #undef VSYSCALL_ADDR | ||
104 | #undef VSYSCALL_PHYS_ADDR | ||
105 | #undef VSYSCALL_VIRT_ADDR | ||
106 | #undef VLOAD_OFFSET | ||
107 | #undef VLOAD | ||
108 | #undef VVIRT_OFFSET | ||
109 | #undef VVIRT | ||
76 | 110 | ||
77 | . = ALIGN(8192); /* init_task */ | 111 | . = ALIGN(8192); /* init_task */ |
78 | .data.init_task : { *(.data.init_task) } | 112 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { |
113 | *(.data.init_task) | ||
114 | } | ||
79 | 115 | ||
80 | . = ALIGN(4096); | 116 | . = ALIGN(4096); |
81 | .data.page_aligned : { *(.data.page_aligned) } | 117 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { |
118 | *(.data.page_aligned) | ||
119 | } | ||
82 | 120 | ||
83 | . = ALIGN(4096); /* Init code and data */ | 121 | . = ALIGN(4096); /* Init code and data */ |
84 | __init_begin = .; | 122 | __init_begin = .; |
85 | .init.text : { | 123 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
86 | _sinittext = .; | 124 | _sinittext = .; |
87 | *(.init.text) | 125 | *(.init.text) |
88 | _einittext = .; | 126 | _einittext = .; |
89 | } | 127 | } |
90 | __initdata_begin = .; | 128 | __initdata_begin = .; |
91 | .init.data : { *(.init.data) } | 129 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } |
92 | __initdata_end = .; | 130 | __initdata_end = .; |
93 | . = ALIGN(16); | 131 | . = ALIGN(16); |
94 | __setup_start = .; | 132 | __setup_start = .; |
95 | .init.setup : { *(.init.setup) } | 133 | .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) } |
96 | __setup_end = .; | 134 | __setup_end = .; |
97 | __initcall_start = .; | 135 | __initcall_start = .; |
98 | .initcall.init : { | 136 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { |
99 | *(.initcall1.init) | 137 | *(.initcall1.init) |
100 | *(.initcall2.init) | 138 | *(.initcall2.init) |
101 | *(.initcall3.init) | 139 | *(.initcall3.init) |
@@ -106,32 +144,38 @@ SECTIONS | |||
106 | } | 144 | } |
107 | __initcall_end = .; | 145 | __initcall_end = .; |
108 | __con_initcall_start = .; | 146 | __con_initcall_start = .; |
109 | .con_initcall.init : { *(.con_initcall.init) } | 147 | .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { |
148 | *(.con_initcall.init) | ||
149 | } | ||
110 | __con_initcall_end = .; | 150 | __con_initcall_end = .; |
111 | SECURITY_INIT | 151 | SECURITY_INIT |
112 | . = ALIGN(8); | 152 | . = ALIGN(8); |
113 | __alt_instructions = .; | 153 | __alt_instructions = .; |
114 | .altinstructions : { *(.altinstructions) } | 154 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
155 | *(.altinstructions) | ||
156 | } | ||
115 | __alt_instructions_end = .; | 157 | __alt_instructions_end = .; |
116 | .altinstr_replacement : { *(.altinstr_replacement) } | 158 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
159 | *(.altinstr_replacement) | ||
160 | } | ||
117 | /* .exit.text is discard at runtime, not link time, to deal with references | 161 | /* .exit.text is discard at runtime, not link time, to deal with references |
118 | from .altinstructions and .eh_frame */ | 162 | from .altinstructions and .eh_frame */ |
119 | .exit.text : { *(.exit.text) } | 163 | .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } |
120 | .exit.data : { *(.exit.data) } | 164 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } |
121 | . = ALIGN(4096); | 165 | . = ALIGN(4096); |
122 | __initramfs_start = .; | 166 | __initramfs_start = .; |
123 | .init.ramfs : { *(.init.ramfs) } | 167 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } |
124 | __initramfs_end = .; | 168 | __initramfs_end = .; |
125 | . = ALIGN(32); | 169 | . = ALIGN(32); |
126 | __per_cpu_start = .; | 170 | __per_cpu_start = .; |
127 | .data.percpu : { *(.data.percpu) } | 171 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } |
128 | __per_cpu_end = .; | 172 | __per_cpu_end = .; |
129 | . = ALIGN(4096); | 173 | . = ALIGN(4096); |
130 | __init_end = .; | 174 | __init_end = .; |
131 | 175 | ||
132 | . = ALIGN(4096); | 176 | . = ALIGN(4096); |
133 | __nosave_begin = .; | 177 | __nosave_begin = .; |
134 | .data_nosave : { *(.data.nosave) } | 178 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } |
135 | . = ALIGN(4096); | 179 | . = ALIGN(4096); |
136 | __nosave_end = .; | 180 | __nosave_end = .; |
137 | 181 | ||