aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:21:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:21:45 -0400
commit002e44bfb596665178cbf83586faeb8349ea6b9b (patch)
treea33b3f3a3b8709d0ba32c57a7ec62d018bd528af /arch/x86
parente13053f50664d3d614bbc9b8c83abdad979ac7c9 (diff)
parenta3d7b7dddcc38c19aa46509c7282e8def80384a8 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull asm/x86 changes from Ingo Molnar: "Misc changes, with a bigger processor-flags cleanup/reorganization by Peter Anvin" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, asm, cleanup: Replace open-coded control register values with symbolic x86, processor-flags: Fix the datatypes and add bit number defines x86: Rename X86_CR4_RDWRGSFS to X86_CR4_FSGSBASE x86, flags: Rename X86_EFLAGS_BIT1 to X86_EFLAGS_FIXED linux/const.h: Add _BITUL() and _BITULL() x86/vdso: Convert use of typedef ctl_table to struct ctl_table x86: __force_order doesn't need to be an actual variable
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h154
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/vdso/vdso32-setup.c4
12 files changed, 116 insertions, 62 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3741c653767c..af9c5525434d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -59,7 +59,7 @@
59 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ 59 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ 62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
64 64
65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 41fc93a2e225..2f4d924fe6c9 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -16,7 +16,7 @@ static inline void native_clts(void)
16 * all loads stores around it, which can hurt performance. Solution is to 16 * all loads stores around it, which can hurt performance. Solution is to
17 * use a variable and mimic reads and writes to it to enforce serialization 17 * use a variable and mimic reads and writes to it to enforce serialization
18 */ 18 */
19static unsigned long __force_order; 19extern unsigned long __force_order;
20 20
21static inline unsigned long native_read_cr0(void) 21static inline unsigned long native_read_cr0(void)
22{ 22{
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 54991a746043..180a0c3c224d 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -2,75 +2,129 @@
2#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H 2#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */ 3/* Various flags defined: can be included from assembler. */
4 4
5#include <linux/const.h>
6
5/* 7/*
6 * EFLAGS bits 8 * EFLAGS bits
7 */ 9 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ 10#define X86_EFLAGS_CF_BIT 0 /* Carry Flag */
9#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */ 11#define X86_EFLAGS_CF _BITUL(X86_EFLAGS_CF_BIT)
10#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ 12#define X86_EFLAGS_FIXED_BIT 1 /* Bit 1 - always on */
11#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */ 13#define X86_EFLAGS_FIXED _BITUL(X86_EFLAGS_FIXED_BIT)
12#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ 14#define X86_EFLAGS_PF_BIT 2 /* Parity Flag */
13#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ 15#define X86_EFLAGS_PF _BITUL(X86_EFLAGS_PF_BIT)
14#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ 16#define X86_EFLAGS_AF_BIT 4 /* Auxiliary carry Flag */
15#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ 17#define X86_EFLAGS_AF _BITUL(X86_EFLAGS_AF_BIT)
16#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ 18#define X86_EFLAGS_ZF_BIT 6 /* Zero Flag */
17#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ 19#define X86_EFLAGS_ZF _BITUL(X86_EFLAGS_ZF_BIT)
18#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ 20#define X86_EFLAGS_SF_BIT 7 /* Sign Flag */
19#define X86_EFLAGS_NT 0x00004000 /* Nested Task */ 21#define X86_EFLAGS_SF _BITUL(X86_EFLAGS_SF_BIT)
20#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ 22#define X86_EFLAGS_TF_BIT 8 /* Trap Flag */
21#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ 23#define X86_EFLAGS_TF _BITUL(X86_EFLAGS_TF_BIT)
22#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ 24#define X86_EFLAGS_IF_BIT 9 /* Interrupt Flag */
23#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ 25#define X86_EFLAGS_IF _BITUL(X86_EFLAGS_IF_BIT)
24#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ 26#define X86_EFLAGS_DF_BIT 10 /* Direction Flag */
25#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ 27#define X86_EFLAGS_DF _BITUL(X86_EFLAGS_DF_BIT)
28#define X86_EFLAGS_OF_BIT 11 /* Overflow Flag */
29#define X86_EFLAGS_OF _BITUL(X86_EFLAGS_OF_BIT)
30#define X86_EFLAGS_IOPL_BIT 12 /* I/O Privilege Level (2 bits) */
31#define X86_EFLAGS_IOPL (_AC(3,UL) << X86_EFLAGS_IOPL_BIT)
32#define X86_EFLAGS_NT_BIT 14 /* Nested Task */
33#define X86_EFLAGS_NT _BITUL(X86_EFLAGS_NT_BIT)
34#define X86_EFLAGS_RF_BIT 16 /* Resume Flag */
35#define X86_EFLAGS_RF _BITUL(X86_EFLAGS_RF_BIT)
36#define X86_EFLAGS_VM_BIT 17 /* Virtual Mode */
37#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT)
38#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
39#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
40#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
41#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
42#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */
43#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT)
44#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */
45#define X86_EFLAGS_VIP _BITUL(X86_EFLAGS_VIP_BIT)
46#define X86_EFLAGS_ID_BIT 21 /* CPUID detection */
47#define X86_EFLAGS_ID _BITUL(X86_EFLAGS_ID_BIT)
26 48
27/* 49/*
28 * Basic CPU control in CR0 50 * Basic CPU control in CR0
29 */ 51 */
30#define X86_CR0_PE 0x00000001 /* Protection Enable */ 52#define X86_CR0_PE_BIT 0 /* Protection Enable */
31#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */ 53#define X86_CR0_PE _BITUL(X86_CR0_PE_BIT)
32#define X86_CR0_EM 0x00000004 /* Emulation */ 54#define X86_CR0_MP_BIT 1 /* Monitor Coprocessor */
33#define X86_CR0_TS 0x00000008 /* Task Switched */ 55#define X86_CR0_MP _BITUL(X86_CR0_MP_BIT)
34#define X86_CR0_ET 0x00000010 /* Extension Type */ 56#define X86_CR0_EM_BIT 2 /* Emulation */
35#define X86_CR0_NE 0x00000020 /* Numeric Error */ 57#define X86_CR0_EM _BITUL(X86_CR0_EM_BIT)
36#define X86_CR0_WP 0x00010000 /* Write Protect */ 58#define X86_CR0_TS_BIT 3 /* Task Switched */
37#define X86_CR0_AM 0x00040000 /* Alignment Mask */ 59#define X86_CR0_TS _BITUL(X86_CR0_TS_BIT)
38#define X86_CR0_NW 0x20000000 /* Not Write-through */ 60#define X86_CR0_ET_BIT 4 /* Extension Type */
39#define X86_CR0_CD 0x40000000 /* Cache Disable */ 61#define X86_CR0_ET _BITUL(X86_CR0_ET_BIT)
40#define X86_CR0_PG 0x80000000 /* Paging */ 62#define X86_CR0_NE_BIT 5 /* Numeric Error */
63#define X86_CR0_NE _BITUL(X86_CR0_NE_BIT)
64#define X86_CR0_WP_BIT 16 /* Write Protect */
65#define X86_CR0_WP _BITUL(X86_CR0_WP_BIT)
66#define X86_CR0_AM_BIT 18 /* Alignment Mask */
67#define X86_CR0_AM _BITUL(X86_CR0_AM_BIT)
68#define X86_CR0_NW_BIT 29 /* Not Write-through */
69#define X86_CR0_NW _BITUL(X86_CR0_NW_BIT)
70#define X86_CR0_CD_BIT 30 /* Cache Disable */
71#define X86_CR0_CD _BITUL(X86_CR0_CD_BIT)
72#define X86_CR0_PG_BIT 31 /* Paging */
73#define X86_CR0_PG _BITUL(X86_CR0_PG_BIT)
41 74
42/* 75/*
43 * Paging options in CR3 76 * Paging options in CR3
44 */ 77 */
45#define X86_CR3_PWT 0x00000008 /* Page Write Through */ 78#define X86_CR3_PWT_BIT 3 /* Page Write Through */
46#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */ 79#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
47#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */ 80#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
81#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
82#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */
48 83
49/* 84/*
50 * Intel CPU features in CR4 85 * Intel CPU features in CR4
51 */ 86 */
52#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */ 87#define X86_CR4_VME_BIT 0 /* enable vm86 extensions */
53#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */ 88#define X86_CR4_VME _BITUL(X86_CR4_VME_BIT)
54#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */ 89#define X86_CR4_PVI_BIT 1 /* virtual interrupts flag enable */
55#define X86_CR4_DE 0x00000008 /* enable debugging extensions */ 90#define X86_CR4_PVI _BITUL(X86_CR4_PVI_BIT)
56#define X86_CR4_PSE 0x00000010 /* enable page size extensions */ 91#define X86_CR4_TSD_BIT 2 /* disable time stamp at ipl 3 */
57#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */ 92#define X86_CR4_TSD _BITUL(X86_CR4_TSD_BIT)
58#define X86_CR4_MCE 0x00000040 /* Machine check enable */ 93#define X86_CR4_DE_BIT 3 /* enable debugging extensions */
59#define X86_CR4_PGE 0x00000080 /* enable global pages */ 94#define X86_CR4_DE _BITUL(X86_CR4_DE_BIT)
60#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */ 95#define X86_CR4_PSE_BIT 4 /* enable page size extensions */
61#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ 96#define X86_CR4_PSE _BITUL(X86_CR4_PSE_BIT)
62#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ 97#define X86_CR4_PAE_BIT 5 /* enable physical address extensions */
63#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ 98#define X86_CR4_PAE _BITUL(X86_CR4_PAE_BIT)
64#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */ 99#define X86_CR4_MCE_BIT 6 /* Machine check enable */
65#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ 100#define X86_CR4_MCE _BITUL(X86_CR4_MCE_BIT)
66#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ 101#define X86_CR4_PGE_BIT 7 /* enable global pages */
67#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ 102#define X86_CR4_PGE _BITUL(X86_CR4_PGE_BIT)
68#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */ 103#define X86_CR4_PCE_BIT 8 /* enable performance counters at ipl 3 */
104#define X86_CR4_PCE _BITUL(X86_CR4_PCE_BIT)
105#define X86_CR4_OSFXSR_BIT 9 /* enable fast FPU save and restore */
106#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT)
107#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */
108#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT)
109#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */
110#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT)
111#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */
112#define X86_CR4_SMXE _BITUL(X86_CR4_SMXE_BIT)
113#define X86_CR4_FSGSBASE_BIT 16 /* enable RDWRFSGS support */
114#define X86_CR4_FSGSBASE _BITUL(X86_CR4_FSGSBASE_BIT)
115#define X86_CR4_PCIDE_BIT 17 /* enable PCID support */
116#define X86_CR4_PCIDE _BITUL(X86_CR4_PCIDE_BIT)
117#define X86_CR4_OSXSAVE_BIT 18 /* enable xsave and xrestore */
118#define X86_CR4_OSXSAVE _BITUL(X86_CR4_OSXSAVE_BIT)
119#define X86_CR4_SMEP_BIT 20 /* enable SMEP support */
120#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT)
121#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */
122#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT)
69 123
70/* 124/*
71 * x86-64 Task Priority Register, CR8 125 * x86-64 Task Priority Register, CR8
72 */ 126 */
73#define X86_CR8_TPR 0x0000000F /* task priority register */ 127#define X86_CR8_TPR _AC(0x0000000f,UL) /* task priority register */
74 128
75/* 129/*
76 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h> 130 * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index 68a3343e5798..9e451b0876b5 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -167,7 +167,7 @@ static void post_set(void)
167 setCx86(CX86_CCR3, ccr3); 167 setCx86(CX86_CCR3, ccr3);
168 168
169 /* Enable caches */ 169 /* Enable caches */
170 write_cr0(read_cr0() & 0xbfffffff); 170 write_cr0(read_cr0() & ~X86_CR0_CD);
171 171
172 /* Restore value of CR4 */ 172 /* Restore value of CR4 */
173 if (cpu_has_pge) 173 if (cpu_has_pge)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fa72a39e5d46..00f557b95b16 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -701,7 +701,7 @@ static void post_set(void) __releases(set_atomicity_lock)
701 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 701 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
702 702
703 /* Enable caches */ 703 /* Enable caches */
704 write_cr0(read_cr0() & 0xbfffffff); 704 write_cr0(read_cr0() & ~X86_CR0_CD);
705 705
706 /* Restore value of CR4 */ 706 /* Restore value of CR4 */
707 if (cpu_has_pge) 707 if (cpu_has_pge)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 727208941030..5fe1fb2d1490 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -365,7 +365,7 @@ ENDPROC(native_usergs_sysret64)
365 /*CFI_REL_OFFSET ss,0*/ 365 /*CFI_REL_OFFSET ss,0*/
366 pushq_cfi %rax /* rsp */ 366 pushq_cfi %rax /* rsp */
367 CFI_REL_OFFSET rsp,0 367 CFI_REL_OFFSET rsp,0
368 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ 368 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
369 /*CFI_REL_OFFSET rflags,0*/ 369 /*CFI_REL_OFFSET rflags,0*/
370 pushq_cfi $__KERNEL_CS /* cs */ 370 pushq_cfi $__KERNEL_CS /* cs */
371 /*CFI_REL_OFFSET cs,0*/ 371 /*CFI_REL_OFFSET cs,0*/
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7305f7dfc7ab..0339f5c14bf9 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -147,7 +147,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
147 childregs->bp = arg; 147 childregs->bp = arg;
148 childregs->orig_ax = -1; 148 childregs->orig_ax = -1;
149 childregs->cs = __KERNEL_CS | get_kernel_rpl(); 149 childregs->cs = __KERNEL_CS | get_kernel_rpl();
150 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; 150 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
151 p->fpu_counter = 0; 151 p->fpu_counter = 0;
152 p->thread.io_bitmap_ptr = NULL; 152 p->thread.io_bitmap_ptr = NULL;
153 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); 153 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 355ae06dbf94..f99a242730e9 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -176,7 +176,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
176 childregs->bp = arg; 176 childregs->bp = arg;
177 childregs->orig_ax = -1; 177 childregs->orig_ax = -1;
178 childregs->cs = __KERNEL_CS | get_kernel_rpl(); 178 childregs->cs = __KERNEL_CS | get_kernel_rpl();
179 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; 179 childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
180 return 0; 180 return 0;
181 } 181 }
182 *childregs = *current_pt_regs(); 182 *childregs = *current_pt_regs();
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 36818f8ec2be..e13f8e7c22a6 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -186,7 +186,7 @@ identity_mapped:
186 movl CP_PA_PGD(%ebx), %eax 186 movl CP_PA_PGD(%ebx), %eax
187 movl %eax, %cr3 187 movl %eax, %cr3
188 movl %cr0, %eax 188 movl %cr0, %eax
189 orl $(1<<31), %eax 189 orl $X86_CR0_PG, %eax
190 movl %eax, %cr0 190 movl %eax, %cr0
191 lea PAGE_SIZE(%edi), %esp 191 lea PAGE_SIZE(%edi), %esp
192 movl %edi, %eax 192 movl %edi, %eax
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260a91939555..b30f5a54a2ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7942,7 +7942,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
7942 7942
7943 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); 7943 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
7944 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); 7944 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
7945 vmx_set_rflags(vcpu, X86_EFLAGS_BIT1); 7945 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
7946 /* 7946 /*
7947 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 7947 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
7948 * actually changed, because it depends on the current state of 7948 * actually changed, because it depends on the current state of
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e8ba99c34180..292e6ca89f42 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -618,7 +618,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
618 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 618 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
619 return 1; 619 return 1;
620 620
621 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS)) 621 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
622 return 1; 622 return 1;
623 623
624 if (is_long_mode(vcpu)) { 624 if (is_long_mode(vcpu)) {
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 0faad646f5fd..d6bfb876cfb0 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -372,7 +372,7 @@ subsys_initcall(sysenter_setup);
372/* Register vsyscall32 into the ABI table */ 372/* Register vsyscall32 into the ABI table */
373#include <linux/sysctl.h> 373#include <linux/sysctl.h>
374 374
375static ctl_table abi_table2[] = { 375static struct ctl_table abi_table2[] = {
376 { 376 {
377 .procname = "vsyscall32", 377 .procname = "vsyscall32",
378 .data = &sysctl_vsyscall32, 378 .data = &sysctl_vsyscall32,
@@ -383,7 +383,7 @@ static ctl_table abi_table2[] = {
383 {} 383 {}
384}; 384};
385 385
386static ctl_table abi_root_table2[] = { 386static struct ctl_table abi_root_table2[] = {
387 { 387 {
388 .procname = "abi", 388 .procname = "abi",
389 .mode = 0555, 389 .mode = 0555,