diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic.c | 15 | ||||
-rw-r--r-- | virt/kvm/assigned-dev.c | 3 | ||||
-rw-r--r-- | virt/kvm/async_pf.c | 8 |
3 files changed, 14 insertions, 12 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 47b29834a6b6..56ff9bebb577 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |||
548 | u32 val; | 548 | u32 val; |
549 | u32 *reg; | 549 | u32 *reg; |
550 | 550 | ||
551 | offset >>= 1; | ||
552 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | 551 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, |
553 | vcpu->vcpu_id, offset); | 552 | vcpu->vcpu_id, offset >> 1); |
554 | 553 | ||
555 | if (offset & 2) | 554 | if (offset & 4) |
556 | val = *reg >> 16; | 555 | val = *reg >> 16; |
557 | else | 556 | else |
558 | val = *reg & 0xffff; | 557 | val = *reg & 0xffff; |
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |||
561 | vgic_reg_access(mmio, &val, offset, | 560 | vgic_reg_access(mmio, &val, offset, |
562 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | 561 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); |
563 | if (mmio->is_write) { | 562 | if (mmio->is_write) { |
564 | if (offset < 4) { | 563 | if (offset < 8) { |
565 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ | 564 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ |
566 | return false; | 565 | return false; |
567 | } | 566 | } |
568 | 567 | ||
569 | val = vgic_cfg_compress(val); | 568 | val = vgic_cfg_compress(val); |
570 | if (offset & 2) { | 569 | if (offset & 4) { |
571 | *reg &= 0xffff; | 570 | *reg &= 0xffff; |
572 | *reg |= val << 16; | 571 | *reg |= val << 16; |
573 | } else { | 572 | } else { |
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
916 | case 0: | 915 | case 0: |
917 | if (!target_cpus) | 916 | if (!target_cpus) |
918 | return; | 917 | return; |
918 | break; | ||
919 | 919 | ||
920 | case 1: | 920 | case 1: |
921 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | 921 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; |
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
1667 | if (addr + size < addr) | 1667 | if (addr + size < addr) |
1668 | return -EINVAL; | 1668 | return -EINVAL; |
1669 | 1669 | ||
1670 | *ioaddr = addr; | ||
1670 | ret = vgic_ioaddr_overlap(kvm); | 1671 | ret = vgic_ioaddr_overlap(kvm); |
1671 | if (ret) | 1672 | if (ret) |
1672 | return ret; | 1673 | *ioaddr = VGIC_ADDR_UNDEF; |
1673 | *ioaddr = addr; | 1674 | |
1674 | return ret; | 1675 | return ret; |
1675 | } | 1676 | } |
1676 | 1677 | ||
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 8db43701016f..bf06577fea51 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm, | |||
395 | if (dev->entries_nr == 0) | 395 | if (dev->entries_nr == 0) |
396 | return r; | 396 | return r; |
397 | 397 | ||
398 | r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); | 398 | r = pci_enable_msix_exact(dev->dev, |
399 | dev->host_msix_entries, dev->entries_nr); | ||
399 | if (r) | 400 | if (r) |
400 | return r; | 401 | return r; |
401 | 402 | ||
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 10df100c4514..06e6401d6ef4 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work) | |||
101 | if (waitqueue_active(&vcpu->wq)) | 101 | if (waitqueue_active(&vcpu->wq)) |
102 | wake_up_interruptible(&vcpu->wq); | 102 | wake_up_interruptible(&vcpu->wq); |
103 | 103 | ||
104 | mmdrop(mm); | 104 | mmput(mm); |
105 | kvm_put_kvm(vcpu->kvm); | 105 | kvm_put_kvm(vcpu->kvm); |
106 | } | 106 | } |
107 | 107 | ||
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
118 | flush_work(&work->work); | 118 | flush_work(&work->work); |
119 | #else | 119 | #else |
120 | if (cancel_work_sync(&work->work)) { | 120 | if (cancel_work_sync(&work->work)) { |
121 | mmdrop(work->mm); | 121 | mmput(work->mm); |
122 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ | 122 | kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ |
123 | kmem_cache_free(async_pf_cache, work); | 123 | kmem_cache_free(async_pf_cache, work); |
124 | } | 124 | } |
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, | |||
183 | work->addr = hva; | 183 | work->addr = hva; |
184 | work->arch = *arch; | 184 | work->arch = *arch; |
185 | work->mm = current->mm; | 185 | work->mm = current->mm; |
186 | atomic_inc(&work->mm->mm_count); | 186 | atomic_inc(&work->mm->mm_users); |
187 | kvm_get_kvm(work->vcpu->kvm); | 187 | kvm_get_kvm(work->vcpu->kvm); |
188 | 188 | ||
189 | /* this can't really happen otherwise gfn_to_pfn_async | 189 | /* this can't really happen otherwise gfn_to_pfn_async |
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, | |||
201 | return 1; | 201 | return 1; |
202 | retry_sync: | 202 | retry_sync: |
203 | kvm_put_kvm(work->vcpu->kvm); | 203 | kvm_put_kvm(work->vcpu->kvm); |
204 | mmdrop(work->mm); | 204 | mmput(work->mm); |
205 | kmem_cache_free(async_pf_cache, work); | 205 | kmem_cache_free(async_pf_cache, work); |
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |