diff options
author | Matt Fleming <matt.fleming@intel.com> | 2014-03-13 10:58:42 -0400 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2014-03-17 17:54:17 -0400 |
commit | e10848a26a962e404ac00c897dfe54f14290806d (patch) | |
tree | 3be0d9d69d2471749b22743f02d3dd98b59a9aed | |
parent | 617b3c37da78cb89c63ed880b2405afc7490567b (diff) |
x86/efi: Preserve segment registers in mixed mode
I was triggering a #GP(0) from userland when running with
CONFIG_EFI_MIXED and CONFIG_IA32_EMULATION, from what looked like
register corruption. Turns out that the mixed mode code was trashing the
contents of %ds, %es and %ss in __efi64_thunk().
Save and restore the contents of these segment registers across the call
to __efi64_thunk() so that we don't corrupt the CPU context.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
-rw-r--r-- | arch/x86/platform/efi/efi_stub_64.S | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 65b787a9fc4e..e0984ef0374b 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S | |||
@@ -176,6 +176,13 @@ ENDPROC(efi_call6) | |||
176 | * This function must be invoked with a 1:1 mapped stack. | 176 | * This function must be invoked with a 1:1 mapped stack. |
177 | */ | 177 | */ |
178 | ENTRY(__efi64_thunk) | 178 | ENTRY(__efi64_thunk) |
179 | movl %ds, %eax | ||
180 | push %rax | ||
181 | movl %es, %eax | ||
182 | push %rax | ||
183 | movl %ss, %eax | ||
184 | push %rax | ||
185 | |||
179 | subq $32, %rsp | 186 | subq $32, %rsp |
180 | movl %esi, 0x0(%rsp) | 187 | movl %esi, 0x0(%rsp) |
181 | movl %edx, 0x4(%rsp) | 188 | movl %edx, 0x4(%rsp) |
@@ -191,7 +198,7 @@ ENTRY(__efi64_thunk) | |||
191 | movq %rbx, func_rt_ptr(%rip) | 198 | movq %rbx, func_rt_ptr(%rip) |
192 | 199 | ||
193 | /* Switch to gdt with 32-bit segments */ | 200 | /* Switch to gdt with 32-bit segments */ |
194 | movl 40(%rsp), %eax | 201 | movl 64(%rsp), %eax |
195 | lgdt (%rax) | 202 | lgdt (%rax) |
196 | 203 | ||
197 | leaq efi_enter32(%rip), %rax | 204 | leaq efi_enter32(%rip), %rax |
@@ -203,6 +210,13 @@ ENTRY(__efi64_thunk) | |||
203 | 210 | ||
204 | lgdt save_gdt(%rip) | 211 | lgdt save_gdt(%rip) |
205 | 212 | ||
213 | pop %rbx | ||
214 | movl %ebx, %ss | ||
215 | pop %rbx | ||
216 | movl %ebx, %es | ||
217 | pop %rbx | ||
218 | movl %ebx, %ds | ||
219 | |||
206 | /* | 220 | /* |
207 | * Convert 32-bit status code into 64-bit. | 221 | * Convert 32-bit status code into 64-bit. |
208 | */ | 222 | */ |
@@ -218,11 +232,6 @@ ENTRY(__efi64_thunk) | |||
218 | ENDPROC(__efi64_thunk) | 232 | ENDPROC(__efi64_thunk) |
219 | 233 | ||
220 | ENTRY(efi_exit32) | 234 | ENTRY(efi_exit32) |
221 | xorq %rax, %rax | ||
222 | movl %eax, %ds | ||
223 | movl %eax, %es | ||
224 | movl %eax, %ss | ||
225 | |||
226 | movq func_rt_ptr(%rip), %rax | 235 | movq func_rt_ptr(%rip), %rax |
227 | push %rax | 236 | push %rax |
228 | mov %rdi, %rax | 237 | mov %rdi, %rax |
@@ -267,7 +276,7 @@ ENTRY(efi_enter32) | |||
267 | */ | 276 | */ |
268 | cli | 277 | cli |
269 | 278 | ||
270 | movl 44(%esp), %eax | 279 | movl 68(%esp), %eax |
271 | movl %eax, 2(%eax) | 280 | movl %eax, 2(%eax) |
272 | lgdtl (%eax) | 281 | lgdtl (%eax) |
273 | 282 | ||
@@ -286,7 +295,7 @@ ENTRY(efi_enter32) | |||
286 | xorl %eax, %eax | 295 | xorl %eax, %eax |
287 | lldt %ax | 296 | lldt %ax |
288 | 297 | ||
289 | movl 48(%esp), %eax | 298 | movl 72(%esp), %eax |
290 | pushl $__KERNEL_CS | 299 | pushl $__KERNEL_CS |
291 | pushl %eax | 300 | pushl %eax |
292 | 301 | ||