aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/kernel/head.S1
-rw-r--r--arch/x86_64/kernel/setup.c9
-rw-r--r--arch/x86_64/kernel/trampoline.S168
3 files changed, 156 insertions, 22 deletions
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 2b2e2c51e532..562d62fbd69f 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -101,6 +101,7 @@ startup_32:
101 .org 0x100 101 .org 0x100
102 .globl startup_64 102 .globl startup_64
103startup_64: 103startup_64:
104ENTRY(secondary_startup_64)
104 /* We come here either from startup_32 105 /* We come here either from startup_32
105 * or directly from a 64bit bootloader. 106 * or directly from a 64bit bootloader.
106 * Since we may have come directly from a bootloader we 107 * Since we may have come directly from a bootloader we
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 3d98b696881d..4b114ee31ebc 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -329,15 +329,8 @@ void __init setup_arch(char **cmdline_p)
329#endif 329#endif
330 330
331#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
332 /*
333 * But first pinch a few for the stack/trampoline stuff
334 * FIXME: Don't need the extra page at 4K, but need to fix
335 * trampoline before removing it. (see the GDT stuff)
336 */
337 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
338
339 /* Reserve SMP trampoline */ 332 /* Reserve SMP trampoline */
340 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE); 333 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
341#endif 334#endif
342 335
343#ifdef CONFIG_ACPI_SLEEP 336#ifdef CONFIG_ACPI_SLEEP
diff --git a/arch/x86_64/kernel/trampoline.S b/arch/x86_64/kernel/trampoline.S
index c79b99a9e2f6..13eee63c7bb5 100644
--- a/arch/x86_64/kernel/trampoline.S
+++ b/arch/x86_64/kernel/trampoline.S
@@ -3,6 +3,7 @@
3 * Trampoline.S Derived from Setup.S by Linus Torvalds 3 * Trampoline.S Derived from Setup.S by Linus Torvalds
4 * 4 *
5 * 4 Jan 1997 Michael Chastain: changed to gnu as. 5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
6 * 15 Sept 2005 Eric Biederman: 64bit PIC support
6 * 7 *
7 * Entry: CS:IP point to the start of our code, we are 8 * Entry: CS:IP point to the start of our code, we are
8 * in real mode with no stack, but the rest of the 9 * in real mode with no stack, but the rest of the
@@ -17,15 +18,20 @@
17 * and IP is zero. Thus, data addresses need to be absolute 18 * and IP is zero. Thus, data addresses need to be absolute
18 * (no relocation) and are taken with regard to r_base. 19 * (no relocation) and are taken with regard to r_base.
19 * 20 *
21 * With the addition of trampoline_level4_pgt this code can
22 * now enter a 64bit kernel that lives at arbitrary 64bit
23 * physical addresses.
24 *
20 * If you work on this file, check the object module with objdump 25 * If you work on this file, check the object module with objdump
21 * --full-contents --reloc to make sure there are no relocation 26 * --full-contents --reloc to make sure there are no relocation
22 * entries. For the GDT entry we do hand relocation in smpboot.c 27 * entries.
23 * because of 64bit linker limitations.
24 */ 28 */
25 29
26#include <linux/linkage.h> 30#include <linux/linkage.h>
27#include <asm/segment.h> 31#include <asm/pgtable.h>
28#include <asm/page.h> 32#include <asm/page.h>
33#include <asm/msr.h>
34#include <asm/segment.h>
29 35
30.data 36.data
31 37
@@ -33,15 +39,31 @@
33 39
34ENTRY(trampoline_data) 40ENTRY(trampoline_data)
35r_base = . 41r_base = .
42 cli # We should be safe anyway
36 wbinvd 43 wbinvd
37 mov %cs, %ax # Code and data in the same place 44 mov %cs, %ax # Code and data in the same place
38 mov %ax, %ds 45 mov %ax, %ds
46 mov %ax, %es
47 mov %ax, %ss
39 48
40 cli # We should be safe anyway
41 49
42 movl $0xA5A5A5A5, trampoline_data - r_base 50 movl $0xA5A5A5A5, trampoline_data - r_base
43 # write marker for master knows we're running 51 # write marker for master knows we're running
44 52
53 # Setup stack
54 movw $(trampoline_stack_end - r_base), %sp
55
56 call verify_cpu # Verify the cpu supports long mode
57
58 mov %cs, %ax
59 movzx %ax, %esi # Find the 32bit trampoline location
60 shll $4, %esi
61
62 # Fixup the vectors
63 addl %esi, startup_32_vector - r_base
64 addl %esi, startup_64_vector - r_base
65 addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer
66
45 /* 67 /*
46 * GDT tables in non default location kernel can be beyond 16MB and 68 * GDT tables in non default location kernel can be beyond 16MB and
47 * lgdt will not be able to load the address as in real mode default 69 * lgdt will not be able to load the address as in real mode default
@@ -49,23 +71,141 @@ r_base = .
49 * to 32 bit. 71 * to 32 bit.
50 */ 72 */
51 73
52 lidtl idt_48 - r_base # load idt with 0, 0 74 lidtl tidt - r_base # load idt with 0, 0
53 lgdtl gdt_48 - r_base # load gdt with whatever is appropriate 75 lgdtl tgdt - r_base # load gdt with whatever is appropriate
54 76
55 xor %ax, %ax 77 xor %ax, %ax
56 inc %ax # protected mode (PE) bit 78 inc %ax # protected mode (PE) bit
57 lmsw %ax # into protected mode 79 lmsw %ax # into protected mode
58 # flaush prefetch and jump to startup_32 in arch/x86_64/kernel/head.S 80
59 ljmpl $__KERNEL32_CS, $(startup_32-__START_KERNEL_map) 81 # flush prefetch and jump to startup_32
82 ljmpl *(startup_32_vector - r_base)
83
84 .code32
85 .balign 4
86startup_32:
87 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
88 movl %eax, %ds
89
90 xorl %eax, %eax
91 btsl $5, %eax # Enable PAE mode
92 movl %eax, %cr4
93
94 # Setup trampoline 4 level pagetables
95 leal (trampoline_level4_pgt - r_base)(%esi), %eax
96 movl %eax, %cr3
97
98 movl $MSR_EFER, %ecx
99 movl $(1 << _EFER_LME), %eax # Enable Long Mode
100 xorl %edx, %edx
101 wrmsr
102
103 xorl %eax, %eax
104 btsl $31, %eax # Enable paging and in turn activate Long Mode
105 btsl $0, %eax # Enable protected mode
106 movl %eax, %cr0
107
108 /*
109 * At this point we're in long mode but in 32bit compatibility mode
110 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
111 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
112 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
113 */
114 ljmp *(startup_64_vector - r_base)(%esi)
115
116 .code64
117 .balign 4
118startup_64:
119 # Now jump into the kernel using virtual addresses
120 movq $secondary_startup_64, %rax
121 jmp *%rax
122
123 .code16
124verify_cpu:
125 pushl $0 # Kill any dangerous flags
126 popfl
127
128 /* minimum CPUID flags for x86-64 */
129 /* see http://www.x86-64.org/lists/discuss/msg02971.html */
130#define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|\
131 (1<<13)|(1<<15)|(1<<24)|(1<<25)|(1<<26))
132#define REQUIRED_MASK2 (1<<29)
133
134 pushfl # check for cpuid
135 popl %eax
136 movl %eax, %ebx
137 xorl $0x200000,%eax
138 pushl %eax
139 popfl
140 pushfl
141 popl %eax
142 pushl %ebx
143 popfl
144 cmpl %eax, %ebx
145 jz no_longmode
146
147 xorl %eax, %eax # See if cpuid 1 is implemented
148 cpuid
149 cmpl $0x1, %eax
150 jb no_longmode
151
152 movl $0x01, %eax # Does the cpu have what it takes?
153 cpuid
154 andl $REQUIRED_MASK1, %edx
155 xorl $REQUIRED_MASK1, %edx
156 jnz no_longmode
157
158 movl $0x80000000, %eax # See if extended cpuid is implemented
159 cpuid
160 cmpl $0x80000001, %eax
161 jb no_longmode
162
163 movl $0x80000001, %eax # Does the cpu have what it takes?
164 cpuid
165 andl $REQUIRED_MASK2, %edx
166 xorl $REQUIRED_MASK2, %edx
167 jnz no_longmode
168
169 ret # The cpu supports long mode
170
171no_longmode:
172 hlt
173 jmp no_longmode
174
60 175
61 # Careful these need to be in the same 64K segment as the above; 176 # Careful these need to be in the same 64K segment as the above;
62idt_48: 177tidt:
63 .word 0 # idt limit = 0 178 .word 0 # idt limit = 0
64 .word 0, 0 # idt base = 0L 179 .word 0, 0 # idt base = 0L
65 180
66gdt_48: 181 # Duplicate the global descriptor table
67 .short GDT_ENTRIES*8 - 1 # gdt limit 182 # so the kernel can live anywhere
68 .long cpu_gdt_table-__START_KERNEL_map 183 .balign 4
184tgdt:
185 .short tgdt_end - tgdt # gdt limit
186 .long tgdt - r_base
187 .short 0
188 .quad 0x00cf9b000000ffff # __KERNEL32_CS
189 .quad 0x00af9b000000ffff # __KERNEL_CS
190 .quad 0x00cf93000000ffff # __KERNEL_DS
191tgdt_end:
192
193 .balign 4
194startup_32_vector:
195 .long startup_32 - r_base
196 .word __KERNEL32_CS, 0
197
198 .balign 4
199startup_64_vector:
200 .long startup_64 - r_base
201 .word __KERNEL_CS, 0
202
203trampoline_stack:
204 .org 0x1000
205trampoline_stack_end:
206ENTRY(trampoline_level4_pgt)
207 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
208 .fill 510,8,0
209 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
69 210
70.globl trampoline_end 211ENTRY(trampoline_end)
71trampoline_end: