aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorNadav Har'El <nyh@il.ibm.com>2011-05-25 16:08:30 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 04:45:14 -0400
commit49f705c5324aa13bb5623b392c23996e23eabc23 (patch)
tree3b9ad512eb7d63f1d7a2b60d2671a963c632397c /arch/x86/kvm/vmx.c
parent6a4d7550601b5b17df227959bdbec208384f729c (diff)
KVM: nVMX: Implement VMREAD and VMWRITE
Implement the VMREAD and VMWRITE instructions. With these instructions, L1 can read and write to the VMCS it is holding. The values are read or written to the fields of the vmcs12 structure introduced in a previous patch. Signed-off-by: Nadav Har'El <nyh@il.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c193
1 files changed, 191 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2bc521c9dabe..84d9c93fde05 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4877,6 +4877,195 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
4877 return 1; 4877 return 1;
4878} 4878}
4879 4879
4880enum vmcs_field_type {
4881 VMCS_FIELD_TYPE_U16 = 0,
4882 VMCS_FIELD_TYPE_U64 = 1,
4883 VMCS_FIELD_TYPE_U32 = 2,
4884 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
4885};
4886
4887static inline int vmcs_field_type(unsigned long field)
4888{
4889 if (0x1 & field) /* the *_HIGH fields are all 32 bit */
4890 return VMCS_FIELD_TYPE_U32;
4891 return (field >> 13) & 0x3 ;
4892}
4893
4894static inline int vmcs_field_readonly(unsigned long field)
4895{
4896 return (((field >> 10) & 0x3) == 1);
4897}
4898
4899/*
4900 * Read a vmcs12 field. Since these can have varying lengths and we return
4901 * one type, we chose the biggest type (u64) and zero-extend the return value
4902 * to that size. Note that the caller, handle_vmread, might need to use only
4903 * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
4904 * 64-bit fields are to be returned).
4905 */
4906static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
4907 unsigned long field, u64 *ret)
4908{
4909 short offset = vmcs_field_to_offset(field);
4910 char *p;
4911
4912 if (offset < 0)
4913 return 0;
4914
4915 p = ((char *)(get_vmcs12(vcpu))) + offset;
4916
4917 switch (vmcs_field_type(field)) {
4918 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
4919 *ret = *((natural_width *)p);
4920 return 1;
4921 case VMCS_FIELD_TYPE_U16:
4922 *ret = *((u16 *)p);
4923 return 1;
4924 case VMCS_FIELD_TYPE_U32:
4925 *ret = *((u32 *)p);
4926 return 1;
4927 case VMCS_FIELD_TYPE_U64:
4928 *ret = *((u64 *)p);
4929 return 1;
4930 default:
4931 return 0; /* can never happen. */
4932 }
4933}
4934
4935/*
4936 * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
4937 * used before) all generate the same failure when it is missing.
4938 */
4939static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
4940{
4941 struct vcpu_vmx *vmx = to_vmx(vcpu);
4942 if (vmx->nested.current_vmptr == -1ull) {
4943 nested_vmx_failInvalid(vcpu);
4944 skip_emulated_instruction(vcpu);
4945 return 0;
4946 }
4947 return 1;
4948}
4949
4950static int handle_vmread(struct kvm_vcpu *vcpu)
4951{
4952 unsigned long field;
4953 u64 field_value;
4954 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4955 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4956 gva_t gva = 0;
4957
4958 if (!nested_vmx_check_permission(vcpu) ||
4959 !nested_vmx_check_vmcs12(vcpu))
4960 return 1;
4961
4962 /* Decode instruction info and find the field to read */
4963 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4964 /* Read the field, zero-extended to a u64 field_value */
4965 if (!vmcs12_read_any(vcpu, field, &field_value)) {
4966 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4967 skip_emulated_instruction(vcpu);
4968 return 1;
4969 }
4970 /*
4971 * Now copy part of this value to register or memory, as requested.
4972 * Note that the number of bits actually copied is 32 or 64 depending
4973 * on the guest's mode (32 or 64 bit), not on the given field's length.
4974 */
4975 if (vmx_instruction_info & (1u << 10)) {
4976 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4977 field_value);
4978 } else {
4979 if (get_vmx_mem_address(vcpu, exit_qualification,
4980 vmx_instruction_info, &gva))
4981 return 1;
4982 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
4983 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
4984 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
4985 }
4986
4987 nested_vmx_succeed(vcpu);
4988 skip_emulated_instruction(vcpu);
4989 return 1;
4990}
4991
4992
4993static int handle_vmwrite(struct kvm_vcpu *vcpu)
4994{
4995 unsigned long field;
4996 gva_t gva;
4997 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4998 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4999 char *p;
5000 short offset;
5001 /* The value to write might be 32 or 64 bits, depending on L1's long
5002 * mode, and eventually we need to write that into a field of several
5003 * possible lengths. The code below first zero-extends the value to 64
5004 * bit (field_value), and then copies only the approriate number of
5005 * bits into the vmcs12 field.
5006 */
5007 u64 field_value = 0;
5008 struct x86_exception e;
5009
5010 if (!nested_vmx_check_permission(vcpu) ||
5011 !nested_vmx_check_vmcs12(vcpu))
5012 return 1;
5013
5014 if (vmx_instruction_info & (1u << 10))
5015 field_value = kvm_register_read(vcpu,
5016 (((vmx_instruction_info) >> 3) & 0xf));
5017 else {
5018 if (get_vmx_mem_address(vcpu, exit_qualification,
5019 vmx_instruction_info, &gva))
5020 return 1;
5021 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
5022 &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
5023 kvm_inject_page_fault(vcpu, &e);
5024 return 1;
5025 }
5026 }
5027
5028
5029 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5030 if (vmcs_field_readonly(field)) {
5031 nested_vmx_failValid(vcpu,
5032 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5033 skip_emulated_instruction(vcpu);
5034 return 1;
5035 }
5036
5037 offset = vmcs_field_to_offset(field);
5038 if (offset < 0) {
5039 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5040 skip_emulated_instruction(vcpu);
5041 return 1;
5042 }
5043 p = ((char *) get_vmcs12(vcpu)) + offset;
5044
5045 switch (vmcs_field_type(field)) {
5046 case VMCS_FIELD_TYPE_U16:
5047 *(u16 *)p = field_value;
5048 break;
5049 case VMCS_FIELD_TYPE_U32:
5050 *(u32 *)p = field_value;
5051 break;
5052 case VMCS_FIELD_TYPE_U64:
5053 *(u64 *)p = field_value;
5054 break;
5055 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5056 *(natural_width *)p = field_value;
5057 break;
5058 default:
5059 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5060 skip_emulated_instruction(vcpu);
5061 return 1;
5062 }
5063
5064 nested_vmx_succeed(vcpu);
5065 skip_emulated_instruction(vcpu);
5066 return 1;
5067}
5068
4880/* Emulate the VMPTRLD instruction */ 5069/* Emulate the VMPTRLD instruction */
4881static int handle_vmptrld(struct kvm_vcpu *vcpu) 5070static int handle_vmptrld(struct kvm_vcpu *vcpu)
4882{ 5071{
@@ -4988,9 +5177,9 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
4988 [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, 5177 [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
4989 [EXIT_REASON_VMPTRLD] = handle_vmptrld, 5178 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
4990 [EXIT_REASON_VMPTRST] = handle_vmptrst, 5179 [EXIT_REASON_VMPTRST] = handle_vmptrst,
4991 [EXIT_REASON_VMREAD] = handle_vmx_insn, 5180 [EXIT_REASON_VMREAD] = handle_vmread,
4992 [EXIT_REASON_VMRESUME] = handle_vmx_insn, 5181 [EXIT_REASON_VMRESUME] = handle_vmx_insn,
4993 [EXIT_REASON_VMWRITE] = handle_vmx_insn, 5182 [EXIT_REASON_VMWRITE] = handle_vmwrite,
4994 [EXIT_REASON_VMOFF] = handle_vmoff, 5183 [EXIT_REASON_VMOFF] = handle_vmoff,
4995 [EXIT_REASON_VMON] = handle_vmon, 5184 [EXIT_REASON_VMON] = handle_vmon,
4996 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 5185 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,