diff options
author | Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> | 2012-02-07 12:49:20 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-08 07:10:11 -0500 |
commit | 10166744b80a41c30d82bc6e11140f5b28d257ab (patch) | |
tree | 53b362f77d16ed91d8ecf373f7106fde12027ab7 /arch/x86 | |
parent | e26101b116a6235bcd80b3a4c38c9fe91286cd79 (diff) |
KVM: VMX: remove yield_on_hlt
yield_on_hlt was introduced for CPU bandwidth capping. Now it is
redundant with CFS hardlimit.
yield_on_hlt also complicates the scenario in paravirtual environment,
that needs to trap halt. for e.g. paravirtualized ticket spinlocks.
Acked-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/vmx.c | 22 |
1 files changed, 1 insertions, 21 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 575fb742a6fc..d2bd719925a6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -70,9 +70,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
70 | static bool __read_mostly vmm_exclusive = 1; | 70 | static bool __read_mostly vmm_exclusive = 1; |
71 | module_param(vmm_exclusive, bool, S_IRUGO); | 71 | module_param(vmm_exclusive, bool, S_IRUGO); |
72 | 72 | ||
73 | static bool __read_mostly yield_on_hlt = 1; | ||
74 | module_param(yield_on_hlt, bool, S_IRUGO); | ||
75 | |||
76 | static bool __read_mostly fasteoi = 1; | 73 | static bool __read_mostly fasteoi = 1; |
77 | module_param(fasteoi, bool, S_IRUGO); | 74 | module_param(fasteoi, bool, S_IRUGO); |
78 | 75 | ||
@@ -1655,17 +1652,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
1655 | vmx_set_interrupt_shadow(vcpu, 0); | 1652 | vmx_set_interrupt_shadow(vcpu, 0); |
1656 | } | 1653 | } |
1657 | 1654 | ||
1658 | static void vmx_clear_hlt(struct kvm_vcpu *vcpu) | ||
1659 | { | ||
1660 | /* Ensure that we clear the HLT state in the VMCS. We don't need to | ||
1661 | * explicitly skip the instruction because if the HLT state is set, then | ||
1662 | * the instruction is already executing and RIP has already been | ||
1663 | * advanced. */ | ||
1664 | if (!yield_on_hlt && | ||
1665 | vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) | ||
1666 | vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); | ||
1667 | } | ||
1668 | |||
1669 | /* | 1655 | /* |
1670 | * KVM wants to inject page-faults which it got to the guest. This function | 1656 | * KVM wants to inject page-faults which it got to the guest. This function |
1671 | * checks whether in a nested guest, we need to inject them to L1 or L2. | 1657 | * checks whether in a nested guest, we need to inject them to L1 or L2. |
@@ -1718,7 +1704,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
1718 | intr_info |= INTR_TYPE_HARD_EXCEPTION; | 1704 | intr_info |= INTR_TYPE_HARD_EXCEPTION; |
1719 | 1705 | ||
1720 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); | 1706 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); |
1721 | vmx_clear_hlt(vcpu); | ||
1722 | } | 1707 | } |
1723 | 1708 | ||
1724 | static bool vmx_rdtscp_supported(void) | 1709 | static bool vmx_rdtscp_supported(void) |
@@ -2405,7 +2390,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2405 | &_pin_based_exec_control) < 0) | 2390 | &_pin_based_exec_control) < 0) |
2406 | return -EIO; | 2391 | return -EIO; |
2407 | 2392 | ||
2408 | min = | 2393 | min = CPU_BASED_HLT_EXITING | |
2409 | #ifdef CONFIG_X86_64 | 2394 | #ifdef CONFIG_X86_64 |
2410 | CPU_BASED_CR8_LOAD_EXITING | | 2395 | CPU_BASED_CR8_LOAD_EXITING | |
2411 | CPU_BASED_CR8_STORE_EXITING | | 2396 | CPU_BASED_CR8_STORE_EXITING | |
@@ -2420,9 +2405,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2420 | CPU_BASED_INVLPG_EXITING | | 2405 | CPU_BASED_INVLPG_EXITING | |
2421 | CPU_BASED_RDPMC_EXITING; | 2406 | CPU_BASED_RDPMC_EXITING; |
2422 | 2407 | ||
2423 | if (yield_on_hlt) | ||
2424 | min |= CPU_BASED_HLT_EXITING; | ||
2425 | |||
2426 | opt = CPU_BASED_TPR_SHADOW | | 2408 | opt = CPU_BASED_TPR_SHADOW | |
2427 | CPU_BASED_USE_MSR_BITMAPS | | 2409 | CPU_BASED_USE_MSR_BITMAPS | |
2428 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 2410 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
@@ -4009,7 +3991,6 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |||
4009 | } else | 3991 | } else |
4010 | intr |= INTR_TYPE_EXT_INTR; | 3992 | intr |= INTR_TYPE_EXT_INTR; |
4011 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); | 3993 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); |
4012 | vmx_clear_hlt(vcpu); | ||
4013 | } | 3994 | } |
4014 | 3995 | ||
4015 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | 3996 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
@@ -4041,7 +4022,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
4041 | } | 4022 | } |
4042 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 4023 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
4043 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | 4024 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
4044 | vmx_clear_hlt(vcpu); | ||
4045 | } | 4025 | } |
4046 | 4026 | ||
4047 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | 4027 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |