diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2013-06-07 04:51:25 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-06-27 07:20:17 -0400 |
commit | b37fbea6cefc3a8ff7b6cfec9867432d1a10046d (patch) | |
tree | ff8d604eeeb37e1356fd2b5760955f850f870b6f /arch/x86/kvm/mmu.c | |
parent | f2fd125d32822ee32779551a70d256a7c27dbe40 (diff) |
KVM: MMU: make return value of mmio page fault handler more readable
Define some meaningful names instead of raw code
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5d9a1fb108f5..476d155834b9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3224,17 +3224,12 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) | |||
3224 | return spte; | 3224 | return spte; |
3225 | } | 3225 | } |
3226 | 3226 | ||
3227 | /* | ||
3228 | * If it is a real mmio page fault, return 1 and emulat the instruction | ||
3229 | * directly, return 0 to let CPU fault again on the address, -1 is | ||
3230 | * returned if bug is detected. | ||
3231 | */ | ||
3232 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) | 3227 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
3233 | { | 3228 | { |
3234 | u64 spte; | 3229 | u64 spte; |
3235 | 3230 | ||
3236 | if (quickly_check_mmio_pf(vcpu, addr, direct)) | 3231 | if (quickly_check_mmio_pf(vcpu, addr, direct)) |
3237 | return 1; | 3232 | return RET_MMIO_PF_EMULATE; |
3238 | 3233 | ||
3239 | spte = walk_shadow_page_get_mmio_spte(vcpu, addr); | 3234 | spte = walk_shadow_page_get_mmio_spte(vcpu, addr); |
3240 | 3235 | ||
@@ -3247,7 +3242,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) | |||
3247 | 3242 | ||
3248 | trace_handle_mmio_page_fault(addr, gfn, access); | 3243 | trace_handle_mmio_page_fault(addr, gfn, access); |
3249 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); | 3244 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); |
3250 | return 1; | 3245 | return RET_MMIO_PF_EMULATE; |
3251 | } | 3246 | } |
3252 | 3247 | ||
3253 | /* | 3248 | /* |
@@ -3255,13 +3250,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) | |||
3255 | * it's a BUG if the gfn is not a mmio page. | 3250 | * it's a BUG if the gfn is not a mmio page. |
3256 | */ | 3251 | */ |
3257 | if (direct && !check_direct_spte_mmio_pf(spte)) | 3252 | if (direct && !check_direct_spte_mmio_pf(spte)) |
3258 | return -1; | 3253 | return RET_MMIO_PF_BUG; |
3259 | 3254 | ||
3260 | /* | 3255 | /* |
3261 | * If the page table is zapped by other cpus, let CPU fault again on | 3256 | * If the page table is zapped by other cpus, let CPU fault again on |
3262 | * the address. | 3257 | * the address. |
3263 | */ | 3258 | */ |
3264 | return 0; | 3259 | return RET_MMIO_PF_RETRY; |
3265 | } | 3260 | } |
3266 | EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); | 3261 | EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); |
3267 | 3262 | ||
@@ -3271,7 +3266,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, | |||
3271 | int ret; | 3266 | int ret; |
3272 | 3267 | ||
3273 | ret = handle_mmio_page_fault_common(vcpu, addr, direct); | 3268 | ret = handle_mmio_page_fault_common(vcpu, addr, direct); |
3274 | WARN_ON(ret < 0); | 3269 | WARN_ON(ret == RET_MMIO_PF_BUG); |
3275 | return ret; | 3270 | return ret; |
3276 | } | 3271 | } |
3277 | 3272 | ||