diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:53 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:38 -0400 |
commit | 3d06b8bfd44ec421c386241f7c5af66c8200cbf4 (patch) | |
tree | 3b9c13bbc974a711db264d60384e869c94f8a6cd /arch | |
parent | 2329d46d213d0721dafae18db29f54b196f11468 (diff) |
KVM: MMU: Introduce kvm_read_nested_guest_page()
This patch introduces the kvm_read_guest_page_x86 function
which reads from the physical memory of the guest. If the
guest is running in guest-mode itself with nested paging
enabled it will read from the guest's guest physical memory
instead.
The patch also changes changes the code to use this function
where it is necessary.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/x86.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a2efb70f4cc8..46843ed36dc1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -392,6 +392,13 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
392 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); | 393 | EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); |
394 | 394 | ||
395 | int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, | ||
396 | void *data, int offset, int len, u32 access) | ||
397 | { | ||
398 | return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, | ||
399 | data, offset, len, access); | ||
400 | } | ||
401 | |||
395 | /* | 402 | /* |
396 | * Load the pae pdptrs. Return true is they are all valid. | 403 | * Load the pae pdptrs. Return true is they are all valid. |
397 | */ | 404 | */ |
@@ -403,8 +410,9 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
403 | int ret; | 410 | int ret; |
404 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; | 411 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; |
405 | 412 | ||
406 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, | 413 | ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte, |
407 | offset * sizeof(u64), sizeof(pdpte)); | 414 | offset * sizeof(u64), sizeof(pdpte), |
415 | PFERR_USER_MASK|PFERR_WRITE_MASK); | ||
408 | if (ret < 0) { | 416 | if (ret < 0) { |
409 | ret = 0; | 417 | ret = 0; |
410 | goto out; | 418 | goto out; |
@@ -433,6 +441,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
433 | { | 441 | { |
434 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; | 442 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; |
435 | bool changed = true; | 443 | bool changed = true; |
444 | int offset; | ||
445 | gfn_t gfn; | ||
436 | int r; | 446 | int r; |
437 | 447 | ||
438 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 448 | if (is_long_mode(vcpu) || !is_pae(vcpu)) |
@@ -442,7 +452,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
442 | (unsigned long *)&vcpu->arch.regs_avail)) | 452 | (unsigned long *)&vcpu->arch.regs_avail)) |
443 | return true; | 453 | return true; |
444 | 454 | ||
445 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); | 455 | gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT; |
456 | offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1); | ||
457 | r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), | ||
458 | PFERR_USER_MASK | PFERR_WRITE_MASK); | ||
446 | if (r < 0) | 459 | if (r < 0) |
447 | goto out; | 460 | goto out; |
448 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; | 461 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; |