aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kvm/kvm-ia64.c14
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--include/linux/kvm.h2
-rw-r--r--virt/kvm/kvm_main.c11
5 files changed, 24 insertions, 13 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 28af6a731bb8..d20a5db4c4dd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -610,20 +610,22 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
610 int r; 610 int r;
611 611
612again: 612again:
613 preempt_disable();
614 local_irq_disable();
615
616 if (signal_pending(current)) { 613 if (signal_pending(current)) {
617 local_irq_enable();
618 preempt_enable();
619 r = -EINTR; 614 r = -EINTR;
620 kvm_run->exit_reason = KVM_EXIT_INTR; 615 kvm_run->exit_reason = KVM_EXIT_INTR;
621 goto out; 616 goto out;
622 } 617 }
623 618
619 /*
620 * down_read() may sleep and return with interrupts enabled
621 */
622 down_read(&vcpu->kvm->slots_lock);
623
624 preempt_disable();
625 local_irq_disable();
626
624 vcpu->guest_mode = 1; 627 vcpu->guest_mode = 1;
625 kvm_guest_enter(); 628 kvm_guest_enter();
626 down_read(&vcpu->kvm->slots_lock);
627 r = vti_vcpu_run(vcpu, kvm_run); 629 r = vti_vcpu_run(vcpu, kvm_run);
628 if (r < 0) { 630 if (r < 0) {
629 local_irq_enable(); 631 local_irq_enable();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a36f7f7c4c7..b6caf1329b1b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1249 sp->gfn = gfn; 1249 sp->gfn = gfn;
1250 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge; 1251 sp->global = 0;
1252 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1253 if (!direct) { 1253 if (!direct) {
1254 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8ca100a9ecac..7c1ce5ac6131 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2775,6 +2775,9 @@ out:
2775 2775
2776void kvm_arch_exit(void) 2776void kvm_arch_exit(void)
2777{ 2777{
2778 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
2779 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
2780 CPUFREQ_TRANSITION_NOTIFIER);
2778 kvm_x86_ops = NULL; 2781 kvm_x86_ops = NULL;
2779 kvm_mmu_module_exit(); 2782 kvm_mmu_module_exit();
2780} 2783}
@@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
4159 4162
4160void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 4163void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4161{ 4164{
4165 if (vcpu->arch.time_page) {
4166 kvm_release_page_dirty(vcpu->arch.time_page);
4167 vcpu->arch.time_page = NULL;
4168 }
4169
4162 kvm_x86_ops->vcpu_free(vcpu); 4170 kvm_x86_ops->vcpu_free(vcpu);
4163} 4171}
4164 4172
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 311a073afe8a..8cc137911b34 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -409,6 +409,8 @@ struct kvm_trace_rec {
409#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT 409#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
410#define KVM_CAP_DEVICE_DEASSIGNMENT 27 410#define KVM_CAP_DEVICE_DEASSIGNMENT 27
411#endif 411#endif
412/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
413#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
412 414
413#ifdef KVM_CAP_IRQ_ROUTING 415#ifdef KVM_CAP_IRQ_ROUTING
414 416
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 605697e9c4dd..1ecbe2391c8b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
920 int r; 920 int r;
921 gfn_t base_gfn; 921 gfn_t base_gfn;
922 unsigned long npages; 922 unsigned long npages;
923 int largepages;
923 unsigned long i; 924 unsigned long i;
924 struct kvm_memory_slot *memslot; 925 struct kvm_memory_slot *memslot;
925 struct kvm_memory_slot old, new; 926 struct kvm_memory_slot old, new;
@@ -960,7 +961,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
960 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 961 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
961 struct kvm_memory_slot *s = &kvm->memslots[i]; 962 struct kvm_memory_slot *s = &kvm->memslots[i];
962 963
963 if (s == memslot) 964 if (s == memslot || !s->npages)
964 continue; 965 continue;
965 if (!((base_gfn + npages <= s->base_gfn) || 966 if (!((base_gfn + npages <= s->base_gfn) ||
966 (base_gfn >= s->base_gfn + s->npages))) 967 (base_gfn >= s->base_gfn + s->npages)))
@@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
995 new.userspace_addr = 0; 996 new.userspace_addr = 0;
996 } 997 }
997 if (npages && !new.lpage_info) { 998 if (npages && !new.lpage_info) {
998 int largepages = npages / KVM_PAGES_PER_HPAGE; 999 largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
999 if (npages % KVM_PAGES_PER_HPAGE) 1000 largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
1000 largepages++;
1001 if (base_gfn % KVM_PAGES_PER_HPAGE)
1002 largepages++;
1003 1001
1004 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); 1002 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
1005 1003
@@ -1985,6 +1983,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
1985 switch (arg) { 1983 switch (arg) {
1986 case KVM_CAP_USER_MEMORY: 1984 case KVM_CAP_USER_MEMORY:
1987 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 1985 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1986 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1988 return 1; 1987 return 1;
1989#ifdef CONFIG_HAVE_KVM_IRQCHIP 1988#ifdef CONFIG_HAVE_KVM_IRQCHIP
1990 case KVM_CAP_IRQ_ROUTING: 1989 case KVM_CAP_IRQ_ROUTING: