diff options
author | Avi Kivity <avi@redhat.com> | 2010-01-21 08:31:51 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:36:05 -0500 |
commit | 8ae099127668de80e4babba73bb492740ce3a1be (patch) | |
tree | 2485d31b421b83edac00161fb32b5ee049655254 /arch | |
parent | f6801dff23bd1902473902194667f4ac1eb6ea26 (diff) |
KVM: Optimize kvm_read_cr[04]_bits()
'mask' is always a constant, so we can check whether it includes a bit that
might be owned by the guest very cheaply, and avoid the decache call. Saves
a few hundred bytes of module text.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/kvm_cache_regs.h | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 6b419a36cbd9..cff851cf5322 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef ASM_KVM_CACHE_REGS_H | 1 | #ifndef ASM_KVM_CACHE_REGS_H |
2 | #define ASM_KVM_CACHE_REGS_H | 2 | #define ASM_KVM_CACHE_REGS_H |
3 | 3 | ||
4 | #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS | ||
5 | #define KVM_POSSIBLE_CR4_GUEST_BITS \ | ||
6 | (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | ||
7 | | X86_CR4_OSXMMEXCPT | X86_CR4_PGE) | ||
8 | |||
4 | static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, | 9 | static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, |
5 | enum kvm_reg reg) | 10 | enum kvm_reg reg) |
6 | { | 11 | { |
@@ -40,7 +45,8 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) | |||
40 | 45 | ||
41 | static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) | 46 | static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) |
42 | { | 47 | { |
43 | if (mask & vcpu->arch.cr0_guest_owned_bits) | 48 | ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; |
49 | if (tmask & vcpu->arch.cr0_guest_owned_bits) | ||
44 | kvm_x86_ops->decache_cr0_guest_bits(vcpu); | 50 | kvm_x86_ops->decache_cr0_guest_bits(vcpu); |
45 | return vcpu->arch.cr0 & mask; | 51 | return vcpu->arch.cr0 & mask; |
46 | } | 52 | } |
@@ -52,7 +58,8 @@ static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) | |||
52 | 58 | ||
53 | static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) | 59 | static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) |
54 | { | 60 | { |
55 | if (mask & vcpu->arch.cr4_guest_owned_bits) | 61 | ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; |
62 | if (tmask & vcpu->arch.cr4_guest_owned_bits) | ||
56 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); | 63 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
57 | return vcpu->arch.cr4 & mask; | 64 | return vcpu->arch.cr4 & mask; |
58 | } | 65 | } |