aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/async_pf.c4
-rw-r--r--virt/kvm/eventfd.c68
-rw-r--r--virt/kvm/irq_comm.c17
-rw-r--r--virt/kvm/irqchip.c31
-rw-r--r--virt/kvm/kvm_main.c25
5 files changed, 95 insertions, 50 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 06e6401d6ef4..d6a3d0993d88 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -80,12 +80,10 @@ static void async_pf_execute(struct work_struct *work)
80 80
81 might_sleep(); 81 might_sleep();
82 82
83 use_mm(mm);
84 down_read(&mm->mmap_sem); 83 down_read(&mm->mmap_sem);
85 get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); 84 get_user_pages(NULL, mm, addr, 1, 1, 0, NULL, NULL);
86 up_read(&mm->mmap_sem); 85 up_read(&mm->mmap_sem);
87 kvm_async_page_present_sync(vcpu, apf); 86 kvm_async_page_present_sync(vcpu, apf);
88 unuse_mm(mm);
89 87
90 spin_lock(&vcpu->async_pf.lock); 88 spin_lock(&vcpu->async_pf.lock);
91 list_add_tail(&apf->link, &vcpu->async_pf.done); 89 list_add_tail(&apf->link, &vcpu->async_pf.done);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 29c2a04e036e..20c3af7692c5 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -31,6 +31,7 @@
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/eventfd.h> 32#include <linux/eventfd.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/srcu.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35 36
36#include "iodev.h" 37#include "iodev.h"
@@ -118,19 +119,22 @@ static void
118irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) 119irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
119{ 120{
120 struct _irqfd_resampler *resampler; 121 struct _irqfd_resampler *resampler;
122 struct kvm *kvm;
121 struct _irqfd *irqfd; 123 struct _irqfd *irqfd;
124 int idx;
122 125
123 resampler = container_of(kian, struct _irqfd_resampler, notifier); 126 resampler = container_of(kian, struct _irqfd_resampler, notifier);
127 kvm = resampler->kvm;
124 128
125 kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 129 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
126 resampler->notifier.gsi, 0, false); 130 resampler->notifier.gsi, 0, false);
127 131
128 rcu_read_lock(); 132 idx = srcu_read_lock(&kvm->irq_srcu);
129 133
130 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) 134 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
131 eventfd_signal(irqfd->resamplefd, 1); 135 eventfd_signal(irqfd->resamplefd, 1);
132 136
133 rcu_read_unlock(); 137 srcu_read_unlock(&kvm->irq_srcu, idx);
134} 138}
135 139
136static void 140static void
@@ -142,7 +146,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
142 mutex_lock(&kvm->irqfds.resampler_lock); 146 mutex_lock(&kvm->irqfds.resampler_lock);
143 147
144 list_del_rcu(&irqfd->resampler_link); 148 list_del_rcu(&irqfd->resampler_link);
145 synchronize_rcu(); 149 synchronize_srcu(&kvm->irq_srcu);
146 150
147 if (list_empty(&resampler->list)) { 151 if (list_empty(&resampler->list)) {
148 list_del(&resampler->link); 152 list_del(&resampler->link);
@@ -221,17 +225,18 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
221 unsigned long flags = (unsigned long)key; 225 unsigned long flags = (unsigned long)key;
222 struct kvm_kernel_irq_routing_entry *irq; 226 struct kvm_kernel_irq_routing_entry *irq;
223 struct kvm *kvm = irqfd->kvm; 227 struct kvm *kvm = irqfd->kvm;
228 int idx;
224 229
225 if (flags & POLLIN) { 230 if (flags & POLLIN) {
226 rcu_read_lock(); 231 idx = srcu_read_lock(&kvm->irq_srcu);
227 irq = rcu_dereference(irqfd->irq_entry); 232 irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu);
228 /* An event has been signaled, inject an interrupt */ 233 /* An event has been signaled, inject an interrupt */
229 if (irq) 234 if (irq)
230 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, 235 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
231 false); 236 false);
232 else 237 else
233 schedule_work(&irqfd->inject); 238 schedule_work(&irqfd->inject);
234 rcu_read_unlock(); 239 srcu_read_unlock(&kvm->irq_srcu, idx);
235 } 240 }
236 241
237 if (flags & POLLHUP) { 242 if (flags & POLLHUP) {
@@ -363,7 +368,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
363 } 368 }
364 369
365 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); 370 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
366 synchronize_rcu(); 371 synchronize_srcu(&kvm->irq_srcu);
367 372
368 mutex_unlock(&kvm->irqfds.resampler_lock); 373 mutex_unlock(&kvm->irqfds.resampler_lock);
369 } 374 }
@@ -465,7 +470,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
465 * another thread calls kvm_irq_routing_update before 470 * another thread calls kvm_irq_routing_update before
466 * we flush workqueue below (we synchronize with 471 * we flush workqueue below (we synchronize with
467 * kvm_irq_routing_update using irqfds.lock). 472 * kvm_irq_routing_update using irqfds.lock).
468 * It is paired with synchronize_rcu done by caller 473 * It is paired with synchronize_srcu done by caller
469 * of that function. 474 * of that function.
470 */ 475 */
471 rcu_assign_pointer(irqfd->irq_entry, NULL); 476 rcu_assign_pointer(irqfd->irq_entry, NULL);
@@ -524,7 +529,7 @@ kvm_irqfd_release(struct kvm *kvm)
524 529
525/* 530/*
526 * Change irq_routing and irqfd. 531 * Change irq_routing and irqfd.
527 * Caller must invoke synchronize_rcu afterwards. 532 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
528 */ 533 */
529void kvm_irq_routing_update(struct kvm *kvm, 534void kvm_irq_routing_update(struct kvm *kvm,
530 struct kvm_irq_routing_table *irq_rt) 535 struct kvm_irq_routing_table *irq_rt)
@@ -600,7 +605,15 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
600{ 605{
601 u64 _val; 606 u64 _val;
602 607
603 if (!(addr == p->addr && len == p->length)) 608 if (addr != p->addr)
609 /* address must be precise for a hit */
610 return false;
611
612 if (!p->length)
613 /* length = 0 means only look at the address, so always a hit */
614 return true;
615
616 if (len != p->length)
604 /* address-range must be precise for a hit */ 617 /* address-range must be precise for a hit */
605 return false; 618 return false;
606 619
@@ -671,9 +684,11 @@ ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
671 684
672 list_for_each_entry(_p, &kvm->ioeventfds, list) 685 list_for_each_entry(_p, &kvm->ioeventfds, list)
673 if (_p->bus_idx == p->bus_idx && 686 if (_p->bus_idx == p->bus_idx &&
674 _p->addr == p->addr && _p->length == p->length && 687 _p->addr == p->addr &&
675 (_p->wildcard || p->wildcard || 688 (!_p->length || !p->length ||
676 _p->datamatch == p->datamatch)) 689 (_p->length == p->length &&
690 (_p->wildcard || p->wildcard ||
691 _p->datamatch == p->datamatch))))
677 return true; 692 return true;
678 693
679 return false; 694 return false;
@@ -697,8 +712,9 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
697 int ret; 712 int ret;
698 713
699 bus_idx = ioeventfd_bus_from_flags(args->flags); 714 bus_idx = ioeventfd_bus_from_flags(args->flags);
700 /* must be natural-word sized */ 715 /* must be natural-word sized, or 0 to ignore length */
701 switch (args->len) { 716 switch (args->len) {
717 case 0:
702 case 1: 718 case 1:
703 case 2: 719 case 2:
704 case 4: 720 case 4:
@@ -716,6 +732,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
716 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) 732 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
717 return -EINVAL; 733 return -EINVAL;
718 734
735 /* ioeventfd with no length can't be combined with DATAMATCH */
736 if (!args->len &&
737 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
738 KVM_IOEVENTFD_FLAG_DATAMATCH))
739 return -EINVAL;
740
719 eventfd = eventfd_ctx_fdget(args->fd); 741 eventfd = eventfd_ctx_fdget(args->fd);
720 if (IS_ERR(eventfd)) 742 if (IS_ERR(eventfd))
721 return PTR_ERR(eventfd); 743 return PTR_ERR(eventfd);
@@ -753,6 +775,16 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
753 if (ret < 0) 775 if (ret < 0)
754 goto unlock_fail; 776 goto unlock_fail;
755 777
778 /* When length is ignored, MMIO is also put on a separate bus, for
779 * faster lookups.
780 */
781 if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
782 ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
783 p->addr, 0, &p->dev);
784 if (ret < 0)
785 goto register_fail;
786 }
787
756 kvm->buses[bus_idx]->ioeventfd_count++; 788 kvm->buses[bus_idx]->ioeventfd_count++;
757 list_add_tail(&p->list, &kvm->ioeventfds); 789 list_add_tail(&p->list, &kvm->ioeventfds);
758 790
@@ -760,6 +792,8 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
760 792
761 return 0; 793 return 0;
762 794
795register_fail:
796 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
763unlock_fail: 797unlock_fail:
764 mutex_unlock(&kvm->slots_lock); 798 mutex_unlock(&kvm->slots_lock);
765 799
@@ -799,6 +833,10 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
799 continue; 833 continue;
800 834
801 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 835 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
836 if (!p->length) {
837 kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
838 &p->dev);
839 }
802 kvm->buses[bus_idx]->ioeventfd_count--; 840 kvm->buses[bus_idx]->ioeventfd_count--;
803 ioeventfd_release(p); 841 ioeventfd_release(p);
804 ret = 0; 842 ret = 0;
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index e2e6b4473a96..ced4a542a031 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -163,6 +163,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
163 struct kvm_kernel_irq_routing_entry *e; 163 struct kvm_kernel_irq_routing_entry *e;
164 int ret = -EINVAL; 164 int ret = -EINVAL;
165 struct kvm_irq_routing_table *irq_rt; 165 struct kvm_irq_routing_table *irq_rt;
166 int idx;
166 167
167 trace_kvm_set_irq(irq, level, irq_source_id); 168 trace_kvm_set_irq(irq, level, irq_source_id);
168 169
@@ -174,8 +175,8 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
174 * Since there's no easy way to do this, we only support injecting MSI 175 * Since there's no easy way to do this, we only support injecting MSI
175 * which is limited to 1:1 GSI mapping. 176 * which is limited to 1:1 GSI mapping.
176 */ 177 */
177 rcu_read_lock(); 178 idx = srcu_read_lock(&kvm->irq_srcu);
178 irq_rt = rcu_dereference(kvm->irq_routing); 179 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
179 if (irq < irq_rt->nr_rt_entries) 180 if (irq < irq_rt->nr_rt_entries)
180 hlist_for_each_entry(e, &irq_rt->map[irq], link) { 181 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
181 if (likely(e->type == KVM_IRQ_ROUTING_MSI)) 182 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
@@ -184,7 +185,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
184 ret = -EWOULDBLOCK; 185 ret = -EWOULDBLOCK;
185 break; 186 break;
186 } 187 }
187 rcu_read_unlock(); 188 srcu_read_unlock(&kvm->irq_srcu, idx);
188 return ret; 189 return ret;
189} 190}
190 191
@@ -253,22 +254,22 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
253 mutex_lock(&kvm->irq_lock); 254 mutex_lock(&kvm->irq_lock);
254 hlist_del_rcu(&kimn->link); 255 hlist_del_rcu(&kimn->link);
255 mutex_unlock(&kvm->irq_lock); 256 mutex_unlock(&kvm->irq_lock);
256 synchronize_rcu(); 257 synchronize_srcu(&kvm->irq_srcu);
257} 258}
258 259
259void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 260void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
260 bool mask) 261 bool mask)
261{ 262{
262 struct kvm_irq_mask_notifier *kimn; 263 struct kvm_irq_mask_notifier *kimn;
263 int gsi; 264 int idx, gsi;
264 265
265 rcu_read_lock(); 266 idx = srcu_read_lock(&kvm->irq_srcu);
266 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 267 gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
267 if (gsi != -1) 268 if (gsi != -1)
268 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) 269 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
269 if (kimn->irq == gsi) 270 if (kimn->irq == gsi)
270 kimn->func(kimn, mask); 271 kimn->func(kimn, mask);
271 rcu_read_unlock(); 272 srcu_read_unlock(&kvm->irq_srcu, idx);
272} 273}
273 274
274int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 275int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 20dc9e4a8f6c..b43c275775cd 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -26,6 +26,7 @@
26 26
27#include <linux/kvm_host.h> 27#include <linux/kvm_host.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/srcu.h>
29#include <linux/export.h> 30#include <linux/export.h>
30#include <trace/events/kvm.h> 31#include <trace/events/kvm.h>
31#include "irq.h" 32#include "irq.h"
@@ -33,19 +34,19 @@
33bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) 34bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
34{ 35{
35 struct kvm_irq_ack_notifier *kian; 36 struct kvm_irq_ack_notifier *kian;
36 int gsi; 37 int gsi, idx;
37 38
38 rcu_read_lock(); 39 idx = srcu_read_lock(&kvm->irq_srcu);
39 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 40 gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
40 if (gsi != -1) 41 if (gsi != -1)
41 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, 42 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
42 link) 43 link)
43 if (kian->gsi == gsi) { 44 if (kian->gsi == gsi) {
44 rcu_read_unlock(); 45 srcu_read_unlock(&kvm->irq_srcu, idx);
45 return true; 46 return true;
46 } 47 }
47 48
48 rcu_read_unlock(); 49 srcu_read_unlock(&kvm->irq_srcu, idx);
49 50
50 return false; 51 return false;
51} 52}
@@ -54,18 +55,18 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
54void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 55void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
55{ 56{
56 struct kvm_irq_ack_notifier *kian; 57 struct kvm_irq_ack_notifier *kian;
57 int gsi; 58 int gsi, idx;
58 59
59 trace_kvm_ack_irq(irqchip, pin); 60 trace_kvm_ack_irq(irqchip, pin);
60 61
61 rcu_read_lock(); 62 idx = srcu_read_lock(&kvm->irq_srcu);
62 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 63 gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
63 if (gsi != -1) 64 if (gsi != -1)
64 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, 65 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
65 link) 66 link)
66 if (kian->gsi == gsi) 67 if (kian->gsi == gsi)
67 kian->irq_acked(kian); 68 kian->irq_acked(kian);
68 rcu_read_unlock(); 69 srcu_read_unlock(&kvm->irq_srcu, idx);
69} 70}
70 71
71void kvm_register_irq_ack_notifier(struct kvm *kvm, 72void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -85,7 +86,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
85 mutex_lock(&kvm->irq_lock); 86 mutex_lock(&kvm->irq_lock);
86 hlist_del_init_rcu(&kian->link); 87 hlist_del_init_rcu(&kian->link);
87 mutex_unlock(&kvm->irq_lock); 88 mutex_unlock(&kvm->irq_lock);
88 synchronize_rcu(); 89 synchronize_srcu(&kvm->irq_srcu);
89#ifdef __KVM_HAVE_IOAPIC 90#ifdef __KVM_HAVE_IOAPIC
90 kvm_vcpu_request_scan_ioapic(kvm); 91 kvm_vcpu_request_scan_ioapic(kvm);
91#endif 92#endif
@@ -115,7 +116,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
115 bool line_status) 116 bool line_status)
116{ 117{
117 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; 118 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
118 int ret = -1, i = 0; 119 int ret = -1, i = 0, idx;
119 struct kvm_irq_routing_table *irq_rt; 120 struct kvm_irq_routing_table *irq_rt;
120 121
121 trace_kvm_set_irq(irq, level, irq_source_id); 122 trace_kvm_set_irq(irq, level, irq_source_id);
@@ -124,12 +125,12 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
124 * IOAPIC. So set the bit in both. The guest will ignore 125 * IOAPIC. So set the bit in both. The guest will ignore
125 * writes to the unused one. 126 * writes to the unused one.
126 */ 127 */
127 rcu_read_lock(); 128 idx = srcu_read_lock(&kvm->irq_srcu);
128 irq_rt = rcu_dereference(kvm->irq_routing); 129 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
129 if (irq < irq_rt->nr_rt_entries) 130 if (irq < irq_rt->nr_rt_entries)
130 hlist_for_each_entry(e, &irq_rt->map[irq], link) 131 hlist_for_each_entry(e, &irq_rt->map[irq], link)
131 irq_set[i++] = *e; 132 irq_set[i++] = *e;
132 rcu_read_unlock(); 133 srcu_read_unlock(&kvm->irq_srcu, idx);
133 134
134 while(i--) { 135 while(i--) {
135 int r; 136 int r;
@@ -226,7 +227,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
226 kvm_irq_routing_update(kvm, new); 227 kvm_irq_routing_update(kvm, new);
227 mutex_unlock(&kvm->irq_lock); 228 mutex_unlock(&kvm->irq_lock);
228 229
229 synchronize_rcu(); 230 synchronize_srcu_expedited(&kvm->irq_srcu);
230 231
231 new = old; 232 new = old;
232 r = 0; 233 r = 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 86d1c457458d..4b6c01b477f9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -186,9 +186,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
186 186
187void kvm_flush_remote_tlbs(struct kvm *kvm) 187void kvm_flush_remote_tlbs(struct kvm *kvm)
188{ 188{
189 long dirty_count = kvm->tlbs_dirty;
190
191 smp_mb();
189 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 192 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
190 ++kvm->stat.remote_tlb_flush; 193 ++kvm->stat.remote_tlb_flush;
191 kvm->tlbs_dirty = false; 194 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
192} 195}
193EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 196EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
194 197
@@ -454,11 +457,11 @@ static struct kvm *kvm_create_vm(unsigned long type)
454 457
455 r = kvm_arch_init_vm(kvm, type); 458 r = kvm_arch_init_vm(kvm, type);
456 if (r) 459 if (r)
457 goto out_err_nodisable; 460 goto out_err_no_disable;
458 461
459 r = hardware_enable_all(); 462 r = hardware_enable_all();
460 if (r) 463 if (r)
461 goto out_err_nodisable; 464 goto out_err_no_disable;
462 465
463#ifdef CONFIG_HAVE_KVM_IRQCHIP 466#ifdef CONFIG_HAVE_KVM_IRQCHIP
464 INIT_HLIST_HEAD(&kvm->mask_notifier_list); 467 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
@@ -470,10 +473,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
470 r = -ENOMEM; 473 r = -ENOMEM;
471 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
472 if (!kvm->memslots) 475 if (!kvm->memslots)
473 goto out_err_nosrcu; 476 goto out_err_no_srcu;
474 kvm_init_memslots_id(kvm); 477 kvm_init_memslots_id(kvm);
475 if (init_srcu_struct(&kvm->srcu)) 478 if (init_srcu_struct(&kvm->srcu))
476 goto out_err_nosrcu; 479 goto out_err_no_srcu;
480 if (init_srcu_struct(&kvm->irq_srcu))
481 goto out_err_no_irq_srcu;
477 for (i = 0; i < KVM_NR_BUSES; i++) { 482 for (i = 0; i < KVM_NR_BUSES; i++) {
478 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 483 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
479 GFP_KERNEL); 484 GFP_KERNEL);
@@ -502,10 +507,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
502 return kvm; 507 return kvm;
503 508
504out_err: 509out_err:
510 cleanup_srcu_struct(&kvm->irq_srcu);
511out_err_no_irq_srcu:
505 cleanup_srcu_struct(&kvm->srcu); 512 cleanup_srcu_struct(&kvm->srcu);
506out_err_nosrcu: 513out_err_no_srcu:
507 hardware_disable_all(); 514 hardware_disable_all();
508out_err_nodisable: 515out_err_no_disable:
509 for (i = 0; i < KVM_NR_BUSES; i++) 516 for (i = 0; i < KVM_NR_BUSES; i++)
510 kfree(kvm->buses[i]); 517 kfree(kvm->buses[i]);
511 kfree(kvm->memslots); 518 kfree(kvm->memslots);
@@ -601,6 +608,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
601 kvm_arch_destroy_vm(kvm); 608 kvm_arch_destroy_vm(kvm);
602 kvm_destroy_devices(kvm); 609 kvm_destroy_devices(kvm);
603 kvm_free_physmem(kvm); 610 kvm_free_physmem(kvm);
611 cleanup_srcu_struct(&kvm->irq_srcu);
604 cleanup_srcu_struct(&kvm->srcu); 612 cleanup_srcu_struct(&kvm->srcu);
605 kvm_arch_free_vm(kvm); 613 kvm_arch_free_vm(kvm);
606 hardware_disable_all(); 614 hardware_disable_all();
@@ -637,14 +645,12 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
637 */ 645 */
638static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 646static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
639{ 647{
640#ifndef CONFIG_S390
641 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 648 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
642 649
643 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 650 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
644 if (!memslot->dirty_bitmap) 651 if (!memslot->dirty_bitmap)
645 return -ENOMEM; 652 return -ENOMEM;
646 653
647#endif /* !CONFIG_S390 */
648 return 0; 654 return 0;
649} 655}
650 656
@@ -2922,6 +2928,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
2922 2928
2923 return -EOPNOTSUPP; 2929 return -EOPNOTSUPP;
2924} 2930}
2931EXPORT_SYMBOL_GPL(kvm_io_bus_write);
2925 2932
2926/* kvm_io_bus_read - called under kvm->slots_lock */ 2933/* kvm_io_bus_read - called under kvm->slots_lock */
2927int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 2934int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,