aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/cpuid.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-06-07 07:07:48 -0400
committerAvi Kivity <avi@redhat.com>2012-07-09 07:19:00 -0400
commit62046e5a867cbff35e0beff42718dda41ff5d74b (patch)
tree310b84d9c0d3428d5fcb452bff02c0f00d6dab37 /arch/x86/kvm/cpuid.c
parentd881e6f6cffe3993245963143cab2528f918e071 (diff)
KVM: Split cpuid register access from computation
Introduce kvm_cpuid() to perform the leaf limit check and calculate register values, and let kvm_emulate_cpuid() just handle reading and writing the registers from/to the vcpu. This allows us to reuse kvm_cpuid() in a context where directly reading and writing registers is not desired. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/cpuid.c')
-rw-r--r--arch/x86/kvm/cpuid.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 61ccbdf3d0ac..197afd53e3a4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -640,33 +640,37 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
640 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); 640 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
641} 641}
642 642
643void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 643void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
644{ 644{
645 u32 function, index; 645 u32 function = *eax, index = *ecx;
646 struct kvm_cpuid_entry2 *best; 646 struct kvm_cpuid_entry2 *best;
647 647
648 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
649 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
650 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
651 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
652 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
653 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
654 best = kvm_find_cpuid_entry(vcpu, function, index); 648 best = kvm_find_cpuid_entry(vcpu, function, index);
655 649
656 if (!best) 650 if (!best)
657 best = check_cpuid_limit(vcpu, function, index); 651 best = check_cpuid_limit(vcpu, function, index);
658 652
659 if (best) { 653 if (best) {
660 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); 654 *eax = best->eax;
661 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); 655 *ebx = best->ebx;
662 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); 656 *ecx = best->ecx;
663 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); 657 *edx = best->edx;
664 } 658 } else
659 *eax = *ebx = *ecx = *edx = 0;
660}
661
662void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
663{
664 u32 function, eax, ebx, ecx, edx;
665
666 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
667 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
668 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
669 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
670 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
671 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
672 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
665 kvm_x86_ops->skip_emulated_instruction(vcpu); 673 kvm_x86_ops->skip_emulated_instruction(vcpu);
666 trace_kvm_cpuid(function, 674 trace_kvm_cpuid(function, eax, ebx, ecx, edx);
667 kvm_register_read(vcpu, VCPU_REGS_RAX),
668 kvm_register_read(vcpu, VCPU_REGS_RBX),
669 kvm_register_read(vcpu, VCPU_REGS_RCX),
670 kvm_register_read(vcpu, VCPU_REGS_RDX));
671} 675}
672EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 676EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);