diff options
author | Paul Mackerras <paulus@samba.org> | 2014-07-19 03:59:36 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-07-28 09:23:16 -0400 |
commit | 1b2e33b071b13980a1f0823fbf139242059697b4 (patch) | |
tree | b0fa41220231de99d8ceea2298ac29e009659043 | |
parent | ef1af2e29622ff3403926ae801a2b10da075a2de (diff) |
KVM: PPC: Book3S: Make kvmppc_ld return a more accurate error indication
At present, kvmppc_ld calls kvmppc_xlate, and if kvmppc_xlate returns
any error indication, it returns -ENOENT, which is taken to mean an
HPTE not found error. However, the error could have been a segment
found (no SLB entry) or a permission error. Similarly,
kvmppc_pte_to_hva currently does permission checking, but any error
from it is taken by kvmppc_ld to mean that the access is an emulated
MMIO access. Also, kvmppc_ld does no execute permission checking.
This fixes these problems by (a) returning any error from kvmppc_xlate
directly, (b) moving the permission check from kvmppc_pte_to_hva
into kvmppc_ld, and (c) adding an execute permission check to kvmppc_ld.
This is similar to what was done for kvmppc_st() by commit 82ff911317c3
("KVM: PPC: Deflect page write faults properly in kvmppc_st").
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 37ca8a0897c3..a3cbada114bc 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -413,17 +413,10 @@ static hva_t kvmppc_bad_hva(void) | |||
413 | return PAGE_OFFSET; | 413 | return PAGE_OFFSET; |
414 | } | 414 | } |
415 | 415 | ||
416 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | 416 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) |
417 | bool read) | ||
418 | { | 417 | { |
419 | hva_t hpage; | 418 | hva_t hpage; |
420 | 419 | ||
421 | if (read && !pte->may_read) | ||
422 | goto err; | ||
423 | |||
424 | if (!read && !pte->may_write) | ||
425 | goto err; | ||
426 | |||
427 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | 420 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); |
428 | if (kvm_is_error_hva(hpage)) | 421 | if (kvm_is_error_hva(hpage)) |
429 | goto err; | 422 | goto err; |
@@ -462,15 +455,23 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
462 | { | 455 | { |
463 | struct kvmppc_pte pte; | 456 | struct kvmppc_pte pte; |
464 | hva_t hva = *eaddr; | 457 | hva_t hva = *eaddr; |
458 | int rc; | ||
465 | 459 | ||
466 | vcpu->stat.ld++; | 460 | vcpu->stat.ld++; |
467 | 461 | ||
468 | if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) | 462 | rc = kvmppc_xlate(vcpu, *eaddr, data, false, &pte); |
469 | goto nopte; | 463 | if (rc) |
464 | return rc; | ||
470 | 465 | ||
471 | *eaddr = pte.raddr; | 466 | *eaddr = pte.raddr; |
472 | 467 | ||
473 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | 468 | if (!pte.may_read) |
469 | return -EPERM; | ||
470 | |||
471 | if (!data && !pte.may_execute) | ||
472 | return -ENOEXEC; | ||
473 | |||
474 | hva = kvmppc_pte_to_hva(vcpu, &pte); | ||
474 | if (kvm_is_error_hva(hva)) | 475 | if (kvm_is_error_hva(hva)) |
475 | goto mmio; | 476 | goto mmio; |
476 | 477 | ||
@@ -481,8 +482,6 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |||
481 | 482 | ||
482 | return EMULATE_DONE; | 483 | return EMULATE_DONE; |
483 | 484 | ||
484 | nopte: | ||
485 | return -ENOENT; | ||
486 | mmio: | 485 | mmio: |
487 | return EMULATE_DO_MMIO; | 486 | return EMULATE_DO_MMIO; |
488 | } | 487 | } |