diff options
author | Matt Fleming <matt.fleming@intel.com> | 2014-01-10 10:37:17 -0500 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2014-03-04 16:25:04 -0500 |
commit | 0154416a71c2a84c3746c8dd8ed25287e36934d3 (patch) | |
tree | 80251a960a808ee33b4822b5613b55ceec28c714 /arch/x86/platform | |
parent | 54b52d87268034859191d671505bb1cfce6bd74d (diff) |
x86/efi: Add early thunk code to go from 64-bit to 32-bit
Implement the transition code to go from IA32e mode to protected mode in
the EFI boot stub. This is required to use 32-bit EFI services from a
64-bit kernel.
Since EFI boot stub is executed in an identity-mapped region, there's
not much we need to do before invoking the 32-bit EFI boot services.
However, we do reload the firmware's global descriptor table
(efi32_boot_gdt) in case things like timer events are still running in
the firmware.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/efi/efi_stub_64.S | 150 |
1 files changed, 150 insertions, 0 deletions
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 88073b140298..a790d69cc85e 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S | |||
@@ -7,6 +7,10 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/segment.h> | ||
11 | #include <asm/msr.h> | ||
12 | #include <asm/processor-flags.h> | ||
13 | #include <asm/page_types.h> | ||
10 | 14 | ||
11 | #define SAVE_XMM \ | 15 | #define SAVE_XMM \ |
12 | mov %rsp, %rax; \ | 16 | mov %rsp, %rax; \ |
@@ -164,6 +168,152 @@ ENTRY(efi_call6) | |||
164 | ret | 168 | ret |
165 | ENDPROC(efi_call6) | 169 | ENDPROC(efi_call6) |
166 | 170 | ||
171 | #ifdef CONFIG_EFI_MIXED | ||
172 | |||
173 | /* | ||
174 | * We run this function from the 1:1 mapping. | ||
175 | * | ||
176 | * This function must be invoked with a 1:1 mapped stack. | ||
177 | */ | ||
178 | ENTRY(__efi64_thunk) | ||
179 | subq $32, %rsp | ||
180 | movl %esi, 0x0(%rsp) | ||
181 | movl %edx, 0x4(%rsp) | ||
182 | movl %ecx, 0x8(%rsp) | ||
183 | movq %r8, %rsi | ||
184 | movl %esi, 0xc(%rsp) | ||
185 | movq %r9, %rsi | ||
186 | movl %esi, 0x10(%rsp) | ||
187 | |||
188 | sgdt save_gdt(%rip) | ||
189 | |||
190 | leaq 1f(%rip), %rbx | ||
191 | movq %rbx, func_rt_ptr(%rip) | ||
192 | |||
193 | /* Switch to gdt with 32-bit segments */ | ||
194 | movl 40(%rsp), %eax | ||
195 | lgdt (%rax) | ||
196 | |||
197 | leaq efi_enter32(%rip), %rax | ||
198 | pushq $__KERNEL_CS | ||
199 | pushq %rax | ||
200 | lretq | ||
201 | |||
202 | 1: addq $32, %rsp | ||
203 | |||
204 | lgdt save_gdt(%rip) | ||
205 | |||
206 | /* | ||
207 | * Convert 32-bit status code into 64-bit. | ||
208 | */ | ||
209 | test %rax, %rax | ||
210 | jz 1f | ||
211 | movl %eax, %ecx | ||
212 | andl $0x0fffffff, %ecx | ||
213 | andl $0xf0000000, %eax | ||
214 | shl $32, %rax | ||
215 | or %rcx, %rax | ||
216 | 1: | ||
217 | ret | ||
218 | ENDPROC(__efi64_thunk) | ||
219 | |||
220 | ENTRY(efi_exit32) | ||
221 | xorq %rax, %rax | ||
222 | movl %eax, %ds | ||
223 | movl %eax, %es | ||
224 | movl %eax, %ss | ||
225 | |||
226 | movq func_rt_ptr(%rip), %rax | ||
227 | push %rax | ||
228 | mov %rdi, %rax | ||
229 | ret | ||
230 | ENDPROC(efi_exit32) | ||
231 | |||
232 | .code32 | ||
233 | /* | ||
234 | * EFI service pointer must be in %edi. | ||
235 | * | ||
236 | * The stack should represent the 32-bit calling convention. | ||
237 | */ | ||
238 | ENTRY(efi_enter32) | ||
239 | movl $__KERNEL_DS, %eax | ||
240 | movl %eax, %ds | ||
241 | movl %eax, %es | ||
242 | movl %eax, %ss | ||
243 | |||
244 | /* Reload pgtables */ | ||
245 | movl %cr3, %eax | ||
246 | movl %eax, %cr3 | ||
247 | |||
248 | /* Disable paging */ | ||
249 | movl %cr0, %eax | ||
250 | btrl $X86_CR0_PG_BIT, %eax | ||
251 | movl %eax, %cr0 | ||
252 | |||
253 | /* Disable long mode via EFER */ | ||
254 | movl $MSR_EFER, %ecx | ||
255 | rdmsr | ||
256 | btrl $_EFER_LME, %eax | ||
257 | wrmsr | ||
258 | |||
259 | call *%edi | ||
260 | |||
261 | /* We must preserve return value */ | ||
262 | movl %eax, %edi | ||
263 | |||
264 | movl 44(%esp), %eax | ||
265 | movl %eax, 2(%eax) | ||
266 | lgdtl (%eax) | ||
267 | |||
268 | movl %cr4, %eax | ||
269 | btsl $(X86_CR4_PAE_BIT), %eax | ||
270 | movl %eax, %cr4 | ||
271 | |||
272 | movl %cr3, %eax | ||
273 | movl %eax, %cr3 | ||
274 | |||
275 | movl $MSR_EFER, %ecx | ||
276 | rdmsr | ||
277 | btsl $_EFER_LME, %eax | ||
278 | wrmsr | ||
279 | |||
280 | xorl %eax, %eax | ||
281 | lldt %ax | ||
282 | |||
283 | movl 48(%esp), %eax | ||
284 | pushl $__KERNEL_CS | ||
285 | pushl %eax | ||
286 | |||
287 | /* Enable paging */ | ||
288 | movl %cr0, %eax | ||
289 | btsl $X86_CR0_PG_BIT, %eax | ||
290 | movl %eax, %cr0 | ||
291 | lret | ||
292 | ENDPROC(efi_enter32) | ||
293 | |||
294 | .data | ||
295 | .balign 8 | ||
296 | .global efi32_boot_gdt | ||
297 | efi32_boot_gdt: .word 0 | ||
298 | .quad 0 | ||
299 | |||
300 | save_gdt: .word 0 | ||
301 | .quad 0 | ||
302 | func_rt_ptr: .quad 0 | ||
303 | |||
304 | .global efi_gdt64 | ||
305 | efi_gdt64: | ||
306 | .word efi_gdt64_end - efi_gdt64 | ||
307 | .long 0 /* Filled out by user */ | ||
308 | .word 0 | ||
309 | .quad 0x0000000000000000 /* NULL descriptor */ | ||
310 | .quad 0x00af9a000000ffff /* __KERNEL_CS */ | ||
311 | .quad 0x00cf92000000ffff /* __KERNEL_DS */ | ||
312 | .quad 0x0080890000000000 /* TS descriptor */ | ||
313 | .quad 0x0000000000000000 /* TS continued */ | ||
314 | efi_gdt64_end: | ||
315 | #endif /* CONFIG_EFI_MIXED */ | ||
316 | |||
167 | .data | 317 | .data |
168 | ENTRY(efi_scratch) | 318 | ENTRY(efi_scratch) |
169 | .fill 3,8,0 | 319 | .fill 3,8,0 |