diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-03-25 12:06:19 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-03-25 12:06:19 -0400 |
| commit | e584152571d1535a3d27138b91e0ece3b713dd6b (patch) | |
| tree | 00fd227c4cbf9f97fbe5a9bf0db8011d8594f317 | |
| parent | 7ed7fe5e82c9fc8473974fbd7389d169b8f17c77 (diff) | |
| parent | e48bb497b95a0f7127f9ff596a6b4c4b206f7dcf (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: MMU: Fix memory leak on guest demand faults
KVM: VMX: convert init_rmode_tss() to slots_lock
KVM: MMU: handle page removal with shadow mapping
KVM: MMU: Fix is_rmap_pte() with io ptes
KVM: VMX: Restore tss even on x86_64
| -rw-r--r-- | arch/x86/kvm/mmu.c | 18 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 7 |
2 files changed, 16 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d8172aabc660..e55af12e11b7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -222,8 +222,7 @@ static int is_io_pte(unsigned long pte) | |||
| 222 | 222 | ||
| 223 | static int is_rmap_pte(u64 pte) | 223 | static int is_rmap_pte(u64 pte) |
| 224 | { | 224 | { |
| 225 | return pte != shadow_trap_nonpresent_pte | 225 | return is_shadow_present_pte(pte); |
| 226 | && pte != shadow_notrap_nonpresent_pte; | ||
| 227 | } | 226 | } |
| 228 | 227 | ||
| 229 | static gfn_t pse36_gfn_delta(u32 gpte) | 228 | static gfn_t pse36_gfn_delta(u32 gpte) |
| @@ -893,14 +892,25 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
| 893 | int *ptwrite, gfn_t gfn, struct page *page) | 892 | int *ptwrite, gfn_t gfn, struct page *page) |
| 894 | { | 893 | { |
| 895 | u64 spte; | 894 | u64 spte; |
| 896 | int was_rmapped = is_rmap_pte(*shadow_pte); | 895 | int was_rmapped = 0; |
| 897 | int was_writeble = is_writeble_pte(*shadow_pte); | 896 | int was_writeble = is_writeble_pte(*shadow_pte); |
| 897 | hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | ||
| 898 | 898 | ||
| 899 | pgprintk("%s: spte %llx access %x write_fault %d" | 899 | pgprintk("%s: spte %llx access %x write_fault %d" |
| 900 | " user_fault %d gfn %lx\n", | 900 | " user_fault %d gfn %lx\n", |
| 901 | __FUNCTION__, *shadow_pte, pt_access, | 901 | __FUNCTION__, *shadow_pte, pt_access, |
| 902 | write_fault, user_fault, gfn); | 902 | write_fault, user_fault, gfn); |
| 903 | 903 | ||
| 904 | if (is_rmap_pte(*shadow_pte)) { | ||
| 905 | if (host_pfn != page_to_pfn(page)) { | ||
| 906 | pgprintk("hfn old %lx new %lx\n", | ||
| 907 | host_pfn, page_to_pfn(page)); | ||
| 908 | rmap_remove(vcpu->kvm, shadow_pte); | ||
| 909 | } | ||
| 910 | else | ||
| 911 | was_rmapped = 1; | ||
| 912 | } | ||
| 913 | |||
| 904 | /* | 914 | /* |
| 905 | * We don't set the accessed bit, since we sometimes want to see | 915 | * We don't set the accessed bit, since we sometimes want to see |
| 906 | * whether the guest actually used the pte (in order to detect | 916 | * whether the guest actually used the pte (in order to detect |
| @@ -1402,7 +1412,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 1402 | up_read(¤t->mm->mmap_sem); | 1412 | up_read(¤t->mm->mmap_sem); |
| 1403 | 1413 | ||
| 1404 | vcpu->arch.update_pte.gfn = gfn; | 1414 | vcpu->arch.update_pte.gfn = gfn; |
| 1405 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); | 1415 | vcpu->arch.update_pte.page = page; |
| 1406 | } | 1416 | } |
| 1407 | 1417 | ||
| 1408 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 1418 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 94ea724638fd..8e1462880d1f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -349,8 +349,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
| 349 | 349 | ||
| 350 | static void reload_tss(void) | 350 | static void reload_tss(void) |
| 351 | { | 351 | { |
| 352 | #ifndef CONFIG_X86_64 | ||
| 353 | |||
| 354 | /* | 352 | /* |
| 355 | * VT restores TR but not its size. Useless. | 353 | * VT restores TR but not its size. Useless. |
| 356 | */ | 354 | */ |
| @@ -361,7 +359,6 @@ static void reload_tss(void) | |||
| 361 | descs = (void *)gdt.base; | 359 | descs = (void *)gdt.base; |
| 362 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ | 360 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ |
| 363 | load_TR_desc(); | 361 | load_TR_desc(); |
| 364 | #endif | ||
| 365 | } | 362 | } |
| 366 | 363 | ||
| 367 | static void load_transition_efer(struct vcpu_vmx *vmx) | 364 | static void load_transition_efer(struct vcpu_vmx *vmx) |
| @@ -1436,7 +1433,7 @@ static int init_rmode_tss(struct kvm *kvm) | |||
| 1436 | int ret = 0; | 1433 | int ret = 0; |
| 1437 | int r; | 1434 | int r; |
| 1438 | 1435 | ||
| 1439 | down_read(¤t->mm->mmap_sem); | 1436 | down_read(&kvm->slots_lock); |
| 1440 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | 1437 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
| 1441 | if (r < 0) | 1438 | if (r < 0) |
| 1442 | goto out; | 1439 | goto out; |
| @@ -1459,7 +1456,7 @@ static int init_rmode_tss(struct kvm *kvm) | |||
| 1459 | 1456 | ||
| 1460 | ret = 1; | 1457 | ret = 1; |
| 1461 | out: | 1458 | out: |
| 1462 | up_read(¤t->mm->mmap_sem); | 1459 | up_read(&kvm->slots_lock); |
| 1463 | return ret; | 1460 | return ret; |
| 1464 | } | 1461 | } |
| 1465 | 1462 | ||
