diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2005-06-25 17:57:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:48 -0400 |
commit | d0537508a9921efced238b20967e50e519ac34af (patch) | |
tree | 80f2743e9299da07c07625af45807e16a7d7f85a /arch/x86_64/boot/compressed/head.S | |
parent | 8a9190853c34289d9181acd9c620c76143bf88ca (diff) |
[PATCH] kexec: x86_64: add CONFIG_PHYSICAL_START
For one kernel to report a crash another kernel has created we need
to have 2 kernels loaded simultaneously in memory. To accomplish this
the two kernels need to built to run at different physical addresses.
This patch adds the CONFIG_PHYSICAL_START option to the x86_64 kernel
so we can do just that. You need to know what you are doing and
the ramifications are before changing this value, and most users
won't care so I have made it depend on CONFIG_EMBEDDED
bzImage kernels will work and run at a different address when compiled
with this option but they will still load at 1MB. If you need a kernel
loaded at a different address as well you need to boot a vmlinux.
Signed-off-by: Eric Biederman <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/boot/compressed/head.S')
-rw-r--r-- | arch/x86_64/boot/compressed/head.S | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86_64/boot/compressed/head.S b/arch/x86_64/boot/compressed/head.S index 27264dbd575c..b0df2aeff5dc 100644 --- a/arch/x86_64/boot/compressed/head.S +++ b/arch/x86_64/boot/compressed/head.S | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/linkage.h> | 29 | #include <linux/linkage.h> |
30 | #include <asm/segment.h> | 30 | #include <asm/segment.h> |
31 | #include <asm/page.h> | ||
31 | 32 | ||
32 | .code32 | 33 | .code32 |
33 | .globl startup_32 | 34 | .globl startup_32 |
@@ -77,7 +78,7 @@ startup_32: | |||
77 | jnz 3f | 78 | jnz 3f |
78 | addl $8,%esp | 79 | addl $8,%esp |
79 | xorl %ebx,%ebx | 80 | xorl %ebx,%ebx |
80 | ljmp $(__KERNEL_CS), $0x100000 | 81 | ljmp $(__KERNEL_CS), $__PHYSICAL_START |
81 | 82 | ||
82 | /* | 83 | /* |
83 | * We come here, if we were loaded high. | 84 | * We come here, if we were loaded high. |
@@ -103,7 +104,7 @@ startup_32: | |||
103 | popl %ecx # lcount | 104 | popl %ecx # lcount |
104 | popl %edx # high_buffer_start | 105 | popl %edx # high_buffer_start |
105 | popl %eax # hcount | 106 | popl %eax # hcount |
106 | movl $0x100000,%edi | 107 | movl $__PHYSICAL_START,%edi |
107 | cli # make sure we don't get interrupted | 108 | cli # make sure we don't get interrupted |
108 | ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine | 109 | ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine |
109 | 110 | ||
@@ -128,7 +129,7 @@ move_routine_start: | |||
128 | movsl | 129 | movsl |
129 | movl %ebx,%esi # Restore setup pointer | 130 | movl %ebx,%esi # Restore setup pointer |
130 | xorl %ebx,%ebx | 131 | xorl %ebx,%ebx |
131 | ljmp $(__KERNEL_CS), $0x100000 | 132 | ljmp $(__KERNEL_CS), $__PHYSICAL_START |
132 | move_routine_end: | 133 | move_routine_end: |
133 | 134 | ||
134 | 135 | ||