aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-30 15:56:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-30 15:56:17 -0500
commitb07d41b77e58baa2df2326cec68dde03cb2348c5 (patch)
tree80bf63803e52c4930dd0118046ab3d2f074d529f /arch/x86
parent8fa4d8702de3b32854344635667290776d1a754b (diff)
parentb4329db0d7fd5a233866e799ad3bae8639e90c71 (diff)
Merge branch 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.33' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: get rid of kvm_create_vm() unused label warning on s390 KVM: powerpc: Fix mtsrin in book3s_64 mmu KVM: ia64: fix build breakage due to host spinlock change KVM: x86: Extend KVM_SET_VCPU_EVENTS with selective updates KVM: LAPIC: make sure IRR bitmap is scanned after vm load KVM: Fix possible circular locking in kvm_vm_ioctl_assign_device() KVM: MMU: remove prefault from invlpg handler
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm.h4
-rw-r--r--arch/x86/kvm/lapic.c1
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/x86.c12
4 files changed, 13 insertions, 22 deletions
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 950df434763f..f46b79f6c16c 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
254 __u8 reserved[31]; 254 __u8 reserved[31];
255}; 255};
256 256
257/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
258#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
259#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
260
257/* for KVM_GET/SET_VCPU_EVENTS */ 261/* for KVM_GET/SET_VCPU_EVENTS */
258struct kvm_vcpu_events { 262struct kvm_vcpu_events {
259 struct { 263 struct {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cd60c0bd1b32..3063a0c4858b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1150 hrtimer_cancel(&apic->lapic_timer.timer); 1150 hrtimer_cancel(&apic->lapic_timer.timer);
1151 update_divide_count(apic); 1151 update_divide_count(apic);
1152 start_apic_timer(apic); 1152 start_apic_timer(apic);
1153 apic->irr_pending = true;
1153} 1154}
1154 1155
1155void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1156void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a6017132fba8..58a0f1e88596 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -455,8 +455,6 @@ out_unlock:
455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
456{ 456{
457 struct kvm_shadow_walk_iterator iterator; 457 struct kvm_shadow_walk_iterator iterator;
458 pt_element_t gpte;
459 gpa_t pte_gpa = -1;
460 int level; 458 int level;
461 u64 *sptep; 459 u64 *sptep;
462 int need_flush = 0; 460 int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
470 if (level == PT_PAGE_TABLE_LEVEL || 468 if (level == PT_PAGE_TABLE_LEVEL ||
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || 469 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { 470 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
473 struct kvm_mmu_page *sp = page_header(__pa(sptep));
474
475 pte_gpa = (sp->gfn << PAGE_SHIFT);
476 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
477 471
478 if (is_shadow_present_pte(*sptep)) { 472 if (is_shadow_present_pte(*sptep)) {
479 rmap_remove(vcpu->kvm, sptep); 473 rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
492 if (need_flush) 486 if (need_flush)
493 kvm_flush_remote_tlbs(vcpu->kvm); 487 kvm_flush_remote_tlbs(vcpu->kvm);
494 spin_unlock(&vcpu->kvm->mmu_lock); 488 spin_unlock(&vcpu->kvm->mmu_lock);
495
496 if (pte_gpa == -1)
497 return;
498 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
499 sizeof(pt_element_t)))
500 return;
501 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
502 if (mmu_topup_memory_caches(vcpu))
503 return;
504 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
505 sizeof(pt_element_t), 0);
506 }
507} 489}
508 490
509static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 491static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d068966fb2a..6651dbf58675 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1913 1913
1914 events->sipi_vector = vcpu->arch.sipi_vector; 1914 events->sipi_vector = vcpu->arch.sipi_vector;
1915 1915
1916 events->flags = 0; 1916 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
1917 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
1917 1918
1918 vcpu_put(vcpu); 1919 vcpu_put(vcpu);
1919} 1920}
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1921static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 1922static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1922 struct kvm_vcpu_events *events) 1923 struct kvm_vcpu_events *events)
1923{ 1924{
1924 if (events->flags) 1925 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
1926 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
1925 return -EINVAL; 1927 return -EINVAL;
1926 1928
1927 vcpu_load(vcpu); 1929 vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1938 kvm_pic_clear_isr_ack(vcpu->kvm); 1940 kvm_pic_clear_isr_ack(vcpu->kvm);
1939 1941
1940 vcpu->arch.nmi_injected = events->nmi.injected; 1942 vcpu->arch.nmi_injected = events->nmi.injected;
1941 vcpu->arch.nmi_pending = events->nmi.pending; 1943 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
1944 vcpu->arch.nmi_pending = events->nmi.pending;
1942 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 1945 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1943 1946
1944 vcpu->arch.sipi_vector = events->sipi_vector; 1947 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
1948 vcpu->arch.sipi_vector = events->sipi_vector;
1945 1949
1946 vcpu_put(vcpu); 1950 vcpu_put(vcpu);
1947 1951