aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Feiner <pfeiner@google.com>2018-08-01 14:06:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-08-03 06:36:23 -0400
commit301d328a6f8b53bb86c5ecf72db7bc178bcf1999 (patch)
tree4022ecc1e96a6309fb722de89c57a770a260f7de
parentcc9aec03e58fea4dbab04c05d1e15852f801ca53 (diff)
x86/cpufeatures: Add EPT_AD feature bit
Some Intel processors have an EPT feature whereby the accessed & dirty bits in EPT entries can be updated by HW. MSR IA32_VMX_EPT_VPID_CAP exposes the presence of this capability. There is no point in trying to use that new feature bit in the VMX code as VMX needs to read the MSR anyway to access other bits, but having the feature bit for EPT_AD in place helps virtualization management as it exposes "ept_ad" in /proc/cpuinfo/$proc/flags if the feature is present. [ tglx: Amended changelog ] Signed-off-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Peter Shier <pshier@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Jim Mattson <jmattson@google.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Borislav Petkov <bp@suse.de> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw@amazon.co.uk> Link: https://lkml.kernel.org/r/20180801180657.138051-1-pshier@google.com
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c10
2 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 5701f5cecd31..7fff98fa5855 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -229,7 +229,7 @@
229 229
230#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ 230#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
231#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 231#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
232 232#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
233 233
234/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ 234/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
235#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ 235#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index eb75564f2d25..c050cd6066af 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -465,14 +465,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
465#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 465#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
466#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 466#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
467#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 467#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
468#define x86_VMX_FEATURE_EPT_CAP_AD 0x00200000
468 469
469 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 470 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
471 u32 msr_vpid_cap, msr_ept_cap;
470 472
471 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 473 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
472 clear_cpu_cap(c, X86_FEATURE_VNMI); 474 clear_cpu_cap(c, X86_FEATURE_VNMI);
473 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 475 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
474 clear_cpu_cap(c, X86_FEATURE_EPT); 476 clear_cpu_cap(c, X86_FEATURE_EPT);
475 clear_cpu_cap(c, X86_FEATURE_VPID); 477 clear_cpu_cap(c, X86_FEATURE_VPID);
478 clear_cpu_cap(c, X86_FEATURE_EPT_AD);
476 479
477 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 480 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
478 msr_ctl = vmx_msr_high | vmx_msr_low; 481 msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +490,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
487 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 490 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
488 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 491 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
489 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 492 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
490 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 493 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
491 set_cpu_cap(c, X86_FEATURE_EPT); 494 set_cpu_cap(c, X86_FEATURE_EPT);
495 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
496 msr_ept_cap, msr_vpid_cap);
497 if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
498 set_cpu_cap(c, X86_FEATURE_EPT_AD);
499 }
492 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 500 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
493 set_cpu_cap(c, X86_FEATURE_VPID); 501 set_cpu_cap(c, X86_FEATURE_VPID);
494 } 502 }