diff options
author | David S. Miller <davem@davemloft.net> | 2010-01-23 03:31:06 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-01-23 03:31:06 -0500 |
commit | 51c24aaacaea90c8e87f1dec75a2ac7622b593f8 (patch) | |
tree | 9f54936c87764bef75e97395cb56b7d1e0df24c6 /arch/x86/kvm | |
parent | 4276e47e2d1c85a2477caf0d22b91c4f2377fba8 (diff) | |
parent | 6be325719b3e54624397e413efd4b33a997e55a3 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/lapic.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 18 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 12 |
3 files changed, 9 insertions, 22 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cd60c0bd1b32..3063a0c4858b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) | |||
1150 | hrtimer_cancel(&apic->lapic_timer.timer); | 1150 | hrtimer_cancel(&apic->lapic_timer.timer); |
1151 | update_divide_count(apic); | 1151 | update_divide_count(apic); |
1152 | start_apic_timer(apic); | 1152 | start_apic_timer(apic); |
1153 | apic->irr_pending = true; | ||
1153 | } | 1154 | } |
1154 | 1155 | ||
1155 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | 1156 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a6017132fba8..58a0f1e88596 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -455,8 +455,6 @@ out_unlock: | |||
455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | 455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
456 | { | 456 | { |
457 | struct kvm_shadow_walk_iterator iterator; | 457 | struct kvm_shadow_walk_iterator iterator; |
458 | pt_element_t gpte; | ||
459 | gpa_t pte_gpa = -1; | ||
460 | int level; | 458 | int level; |
461 | u64 *sptep; | 459 | u64 *sptep; |
462 | int need_flush = 0; | 460 | int need_flush = 0; |
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
470 | if (level == PT_PAGE_TABLE_LEVEL || | 468 | if (level == PT_PAGE_TABLE_LEVEL || |
471 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || | 469 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || |
472 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | 470 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { |
473 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
474 | |||
475 | pte_gpa = (sp->gfn << PAGE_SHIFT); | ||
476 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | ||
477 | 471 | ||
478 | if (is_shadow_present_pte(*sptep)) { | 472 | if (is_shadow_present_pte(*sptep)) { |
479 | rmap_remove(vcpu->kvm, sptep); | 473 | rmap_remove(vcpu->kvm, sptep); |
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
492 | if (need_flush) | 486 | if (need_flush) |
493 | kvm_flush_remote_tlbs(vcpu->kvm); | 487 | kvm_flush_remote_tlbs(vcpu->kvm); |
494 | spin_unlock(&vcpu->kvm->mmu_lock); | 488 | spin_unlock(&vcpu->kvm->mmu_lock); |
495 | |||
496 | if (pte_gpa == -1) | ||
497 | return; | ||
498 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
499 | sizeof(pt_element_t))) | ||
500 | return; | ||
501 | if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { | ||
502 | if (mmu_topup_memory_caches(vcpu)) | ||
503 | return; | ||
504 | kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, | ||
505 | sizeof(pt_element_t), 0); | ||
506 | } | ||
507 | } | 489 | } |
508 | 490 | ||
509 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | 491 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d068966fb2a..6651dbf58675 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
1913 | 1913 | ||
1914 | events->sipi_vector = vcpu->arch.sipi_vector; | 1914 | events->sipi_vector = vcpu->arch.sipi_vector; |
1915 | 1915 | ||
1916 | events->flags = 0; | 1916 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING |
1917 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR); | ||
1917 | 1918 | ||
1918 | vcpu_put(vcpu); | 1919 | vcpu_put(vcpu); |
1919 | } | 1920 | } |
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
1921 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 1922 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
1922 | struct kvm_vcpu_events *events) | 1923 | struct kvm_vcpu_events *events) |
1923 | { | 1924 | { |
1924 | if (events->flags) | 1925 | if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING |
1926 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR)) | ||
1925 | return -EINVAL; | 1927 | return -EINVAL; |
1926 | 1928 | ||
1927 | vcpu_load(vcpu); | 1929 | vcpu_load(vcpu); |
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
1938 | kvm_pic_clear_isr_ack(vcpu->kvm); | 1940 | kvm_pic_clear_isr_ack(vcpu->kvm); |
1939 | 1941 | ||
1940 | vcpu->arch.nmi_injected = events->nmi.injected; | 1942 | vcpu->arch.nmi_injected = events->nmi.injected; |
1941 | vcpu->arch.nmi_pending = events->nmi.pending; | 1943 | if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) |
1944 | vcpu->arch.nmi_pending = events->nmi.pending; | ||
1942 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); | 1945 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); |
1943 | 1946 | ||
1944 | vcpu->arch.sipi_vector = events->sipi_vector; | 1947 | if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) |
1948 | vcpu->arch.sipi_vector = events->sipi_vector; | ||
1945 | 1949 | ||
1946 | vcpu_put(vcpu); | 1950 | vcpu_put(vcpu); |
1947 | 1951 | ||