diff options
-rw-r--r-- | arch/x86/Kconfig | 23 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 12 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 5 |
4 files changed, 43 insertions, 5 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9a952a572585..956c7702471e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -909,14 +909,27 @@ config VM86 | |||
909 | default y | 909 | default y |
910 | depends on X86_32 | 910 | depends on X86_32 |
911 | ---help--- | 911 | ---help--- |
912 | This option is required by programs like DOSEMU to run 16-bit legacy | 912 | This option is required by programs like DOSEMU to run |
913 | code on X86 processors. It also may be needed by software like | 913 | 16-bit real mode legacy code on x86 processors. It also may |
914 | XFree86 to initialize some video cards via BIOS. Disabling this | 914 | be needed by software like XFree86 to initialize some video |
915 | option saves about 6k. | 915 | cards via BIOS. Disabling this option saves about 6K. |
916 | |||
917 | config X86_16BIT | ||
918 | bool "Enable support for 16-bit segments" if EXPERT | ||
919 | default y | ||
920 | ---help--- | ||
921 | This option is required by programs like Wine to run 16-bit | ||
922 | protected mode legacy code on x86 processors. Disabling | ||
923 | this option saves about 300 bytes on i386, or around 6K text | ||
924 | plus 16K runtime memory on x86-64, | ||
925 | |||
926 | config X86_ESPFIX32 | ||
927 | def_bool y | ||
928 | depends on X86_16BIT && X86_32 | ||
916 | 929 | ||
917 | config X86_ESPFIX64 | 930 | config X86_ESPFIX64 |
918 | def_bool y | 931 | def_bool y |
919 | depends on X86_64 | 932 | depends on X86_16BIT && X86_64 |
920 | 933 | ||
921 | config TOSHIBA | 934 | config TOSHIBA |
922 | tristate "Toshiba Laptop support" | 935 | tristate "Toshiba Laptop support" |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 2780b8f3b96c..98313ffaae6a 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -527,6 +527,7 @@ syscall_exit: | |||
527 | restore_all: | 527 | restore_all: |
528 | TRACE_IRQS_IRET | 528 | TRACE_IRQS_IRET |
529 | restore_all_notrace: | 529 | restore_all_notrace: |
530 | #ifdef CONFIG_X86_ESPFIX32 | ||
530 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS | 531 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
531 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | 532 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we |
532 | # are returning to the kernel. | 533 | # are returning to the kernel. |
@@ -537,6 +538,7 @@ restore_all_notrace: | |||
537 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | 538 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
538 | CFI_REMEMBER_STATE | 539 | CFI_REMEMBER_STATE |
539 | je ldt_ss # returning to user-space with LDT SS | 540 | je ldt_ss # returning to user-space with LDT SS |
541 | #endif | ||
540 | restore_nocheck: | 542 | restore_nocheck: |
541 | RESTORE_REGS 4 # skip orig_eax/error_code | 543 | RESTORE_REGS 4 # skip orig_eax/error_code |
542 | irq_return: | 544 | irq_return: |
@@ -549,6 +551,7 @@ ENTRY(iret_exc) | |||
549 | .previous | 551 | .previous |
550 | _ASM_EXTABLE(irq_return,iret_exc) | 552 | _ASM_EXTABLE(irq_return,iret_exc) |
551 | 553 | ||
554 | #ifdef CONFIG_X86_ESPFIX32 | ||
552 | CFI_RESTORE_STATE | 555 | CFI_RESTORE_STATE |
553 | ldt_ss: | 556 | ldt_ss: |
554 | #ifdef CONFIG_PARAVIRT | 557 | #ifdef CONFIG_PARAVIRT |
@@ -592,6 +595,7 @@ ldt_ss: | |||
592 | lss (%esp), %esp /* switch to espfix segment */ | 595 | lss (%esp), %esp /* switch to espfix segment */ |
593 | CFI_ADJUST_CFA_OFFSET -8 | 596 | CFI_ADJUST_CFA_OFFSET -8 |
594 | jmp restore_nocheck | 597 | jmp restore_nocheck |
598 | #endif | ||
595 | CFI_ENDPROC | 599 | CFI_ENDPROC |
596 | ENDPROC(system_call) | 600 | ENDPROC(system_call) |
597 | 601 | ||
@@ -699,6 +703,7 @@ END(syscall_badsys) | |||
699 | * the high word of the segment base from the GDT and swiches to the | 703 | * the high word of the segment base from the GDT and swiches to the |
700 | * normal stack and adjusts ESP with the matching offset. | 704 | * normal stack and adjusts ESP with the matching offset. |
701 | */ | 705 | */ |
706 | #ifdef CONFIG_X86_ESPFIX32 | ||
702 | /* fixup the stack */ | 707 | /* fixup the stack */ |
703 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ | 708 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
704 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | 709 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
@@ -708,8 +713,10 @@ END(syscall_badsys) | |||
708 | pushl_cfi %eax | 713 | pushl_cfi %eax |
709 | lss (%esp), %esp /* switch to the normal stack segment */ | 714 | lss (%esp), %esp /* switch to the normal stack segment */ |
710 | CFI_ADJUST_CFA_OFFSET -8 | 715 | CFI_ADJUST_CFA_OFFSET -8 |
716 | #endif | ||
711 | .endm | 717 | .endm |
712 | .macro UNWIND_ESPFIX_STACK | 718 | .macro UNWIND_ESPFIX_STACK |
719 | #ifdef CONFIG_X86_ESPFIX32 | ||
713 | movl %ss, %eax | 720 | movl %ss, %eax |
714 | /* see if on espfix stack */ | 721 | /* see if on espfix stack */ |
715 | cmpw $__ESPFIX_SS, %ax | 722 | cmpw $__ESPFIX_SS, %ax |
@@ -720,6 +727,7 @@ END(syscall_badsys) | |||
720 | /* switch to normal stack */ | 727 | /* switch to normal stack */ |
721 | FIXUP_ESPFIX_STACK | 728 | FIXUP_ESPFIX_STACK |
722 | 27: | 729 | 27: |
730 | #endif | ||
723 | .endm | 731 | .endm |
724 | 732 | ||
725 | /* | 733 | /* |
@@ -1350,11 +1358,13 @@ END(debug) | |||
1350 | ENTRY(nmi) | 1358 | ENTRY(nmi) |
1351 | RING0_INT_FRAME | 1359 | RING0_INT_FRAME |
1352 | ASM_CLAC | 1360 | ASM_CLAC |
1361 | #ifdef CONFIG_X86_ESPFIX32 | ||
1353 | pushl_cfi %eax | 1362 | pushl_cfi %eax |
1354 | movl %ss, %eax | 1363 | movl %ss, %eax |
1355 | cmpw $__ESPFIX_SS, %ax | 1364 | cmpw $__ESPFIX_SS, %ax |
1356 | popl_cfi %eax | 1365 | popl_cfi %eax |
1357 | je nmi_espfix_stack | 1366 | je nmi_espfix_stack |
1367 | #endif | ||
1358 | cmpl $ia32_sysenter_target,(%esp) | 1368 | cmpl $ia32_sysenter_target,(%esp) |
1359 | je nmi_stack_fixup | 1369 | je nmi_stack_fixup |
1360 | pushl_cfi %eax | 1370 | pushl_cfi %eax |
@@ -1394,6 +1404,7 @@ nmi_debug_stack_check: | |||
1394 | FIX_STACK 24, nmi_stack_correct, 1 | 1404 | FIX_STACK 24, nmi_stack_correct, 1 |
1395 | jmp nmi_stack_correct | 1405 | jmp nmi_stack_correct |
1396 | 1406 | ||
1407 | #ifdef CONFIG_X86_ESPFIX32 | ||
1397 | nmi_espfix_stack: | 1408 | nmi_espfix_stack: |
1398 | /* We have a RING0_INT_FRAME here. | 1409 | /* We have a RING0_INT_FRAME here. |
1399 | * | 1410 | * |
@@ -1415,6 +1426,7 @@ nmi_espfix_stack: | |||
1415 | lss 12+4(%esp), %esp # back to espfix stack | 1426 | lss 12+4(%esp), %esp # back to espfix stack |
1416 | CFI_ADJUST_CFA_OFFSET -24 | 1427 | CFI_ADJUST_CFA_OFFSET -24 |
1417 | jmp irq_return | 1428 | jmp irq_return |
1429 | #endif | ||
1418 | CFI_ENDPROC | 1430 | CFI_ENDPROC |
1419 | END(nmi) | 1431 | END(nmi) |
1420 | 1432 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index bffaa986cafc..da0b9bdcc32e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1045,8 +1045,10 @@ irq_return: | |||
1045 | * Are we returning to a stack segment from the LDT? Note: in | 1045 | * Are we returning to a stack segment from the LDT? Note: in |
1046 | * 64-bit mode SS:RSP on the exception stack is always valid. | 1046 | * 64-bit mode SS:RSP on the exception stack is always valid. |
1047 | */ | 1047 | */ |
1048 | #ifdef CONFIG_X86_ESPFIX64 | ||
1048 | testb $4,(SS-RIP)(%rsp) | 1049 | testb $4,(SS-RIP)(%rsp) |
1049 | jnz irq_return_ldt | 1050 | jnz irq_return_ldt |
1051 | #endif | ||
1050 | 1052 | ||
1051 | irq_return_iret: | 1053 | irq_return_iret: |
1052 | INTERRUPT_RETURN | 1054 | INTERRUPT_RETURN |
@@ -1058,6 +1060,7 @@ ENTRY(native_iret) | |||
1058 | _ASM_EXTABLE(native_iret, bad_iret) | 1060 | _ASM_EXTABLE(native_iret, bad_iret) |
1059 | #endif | 1061 | #endif |
1060 | 1062 | ||
1063 | #ifdef CONFIG_X86_ESPFIX64 | ||
1061 | irq_return_ldt: | 1064 | irq_return_ldt: |
1062 | pushq_cfi %rax | 1065 | pushq_cfi %rax |
1063 | pushq_cfi %rdi | 1066 | pushq_cfi %rdi |
@@ -1081,6 +1084,7 @@ irq_return_ldt: | |||
1081 | movq %rax,%rsp | 1084 | movq %rax,%rsp |
1082 | popq_cfi %rax | 1085 | popq_cfi %rax |
1083 | jmp irq_return_iret | 1086 | jmp irq_return_iret |
1087 | #endif | ||
1084 | 1088 | ||
1085 | .section .fixup,"ax" | 1089 | .section .fixup,"ax" |
1086 | bad_iret: | 1090 | bad_iret: |
@@ -1152,6 +1156,7 @@ END(common_interrupt) | |||
1152 | * modify the stack to make it look like we just entered | 1156 | * modify the stack to make it look like we just entered |
1153 | * the #GP handler from user space, similar to bad_iret. | 1157 | * the #GP handler from user space, similar to bad_iret. |
1154 | */ | 1158 | */ |
1159 | #ifdef CONFIG_X86_ESPFIX64 | ||
1155 | ALIGN | 1160 | ALIGN |
1156 | __do_double_fault: | 1161 | __do_double_fault: |
1157 | XCPT_FRAME 1 RDI+8 | 1162 | XCPT_FRAME 1 RDI+8 |
@@ -1177,6 +1182,9 @@ __do_double_fault: | |||
1177 | retq | 1182 | retq |
1178 | CFI_ENDPROC | 1183 | CFI_ENDPROC |
1179 | END(__do_double_fault) | 1184 | END(__do_double_fault) |
1185 | #else | ||
1186 | # define __do_double_fault do_double_fault | ||
1187 | #endif | ||
1180 | 1188 | ||
1181 | /* | 1189 | /* |
1182 | * End of kprobes section | 1190 | * End of kprobes section |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ebc987398923..c37886d759cc 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { | ||
233 | error = -EINVAL; | ||
234 | goto out_unlock; | ||
235 | } | ||
236 | |||
232 | fill_ldt(&ldt, &ldt_info); | 237 | fill_ldt(&ldt, &ldt_info); |
233 | if (oldmode) | 238 | if (oldmode) |
234 | ldt.avl = 0; | 239 | ldt.avl = 0; |