diff options
author | Paul Mackerras <paulus@samba.org> | 2014-06-30 06:51:11 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-08-05 08:26:20 -0400 |
commit | 9957c86d659a4d5a2bed25ccbd3bfc9c3f25e658 (patch) | |
tree | 27bd3fc644da983d63e34bb1ed54c8dcdab5fe0d /virt | |
parent | 8ba918d488caded2c4368b0b922eb905fe3bb101 (diff) |
KVM: Move all accesses to kvm::irq_routing into irqchip.c
Now that struct _irqfd does not keep a reference to storage pointed
to by the irq_routing field of struct kvm, we can move the statement
that updates it out from under the irqfds.lock and put it in
kvm_set_irq_routing() instead. That means we then have to take a
srcu_read_lock on kvm->irq_srcu around the irqfd_update call in
kvm_irqfd_assign(), since holding the kvm->irqfds.lock no longer
ensures that that the routing can't change.
Combined with changing kvm_irq_map_gsi() and kvm_irq_map_chip_pin()
to take a struct kvm * argument instead of the pointer to the routing
table, this allows us to to move all references to kvm->irq_routing
into irqchip.c. That in turn allows us to move the definition of the
kvm_irq_routing_table struct into irqchip.c as well.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Tested-by: Eric Auger <eric.auger@linaro.org>
Tested-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/eventfd.c | 22 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 6 | ||||
-rw-r--r-- | virt/kvm/irqchip.c | 39 |
3 files changed, 36 insertions, 31 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 15fa9488b2d0..f0075ffb0c35 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -278,14 +278,13 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, | |||
278 | } | 278 | } |
279 | 279 | ||
280 | /* Must be called under irqfds.lock */ | 280 | /* Must be called under irqfds.lock */ |
281 | static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, | 281 | static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd) |
282 | struct kvm_irq_routing_table *irq_rt) | ||
283 | { | 282 | { |
284 | struct kvm_kernel_irq_routing_entry *e; | 283 | struct kvm_kernel_irq_routing_entry *e; |
285 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; | 284 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; |
286 | int i, n_entries; | 285 | int i, n_entries; |
287 | 286 | ||
288 | n_entries = kvm_irq_map_gsi(entries, irq_rt, irqfd->gsi); | 287 | n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); |
289 | 288 | ||
290 | write_seqcount_begin(&irqfd->irq_entry_sc); | 289 | write_seqcount_begin(&irqfd->irq_entry_sc); |
291 | 290 | ||
@@ -304,12 +303,12 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd, | |||
304 | static int | 303 | static int |
305 | kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | 304 | kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) |
306 | { | 305 | { |
307 | struct kvm_irq_routing_table *irq_rt; | ||
308 | struct _irqfd *irqfd, *tmp; | 306 | struct _irqfd *irqfd, *tmp; |
309 | struct fd f; | 307 | struct fd f; |
310 | struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; | 308 | struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; |
311 | int ret; | 309 | int ret; |
312 | unsigned int events; | 310 | unsigned int events; |
311 | int idx; | ||
313 | 312 | ||
314 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); | 313 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); |
315 | if (!irqfd) | 314 | if (!irqfd) |
@@ -403,9 +402,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) | |||
403 | goto fail; | 402 | goto fail; |
404 | } | 403 | } |
405 | 404 | ||
406 | irq_rt = rcu_dereference_protected(kvm->irq_routing, | 405 | idx = srcu_read_lock(&kvm->irq_srcu); |
407 | lockdep_is_held(&kvm->irqfds.lock)); | 406 | irqfd_update(kvm, irqfd); |
408 | irqfd_update(kvm, irqfd, irq_rt); | 407 | srcu_read_unlock(&kvm->irq_srcu, idx); |
409 | 408 | ||
410 | list_add_tail(&irqfd->list, &kvm->irqfds.items); | 409 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
411 | 410 | ||
@@ -539,20 +538,17 @@ kvm_irqfd_release(struct kvm *kvm) | |||
539 | } | 538 | } |
540 | 539 | ||
541 | /* | 540 | /* |
542 | * Change irq_routing and irqfd. | 541 | * Take note of a change in irq routing. |
543 | * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. | 542 | * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. |
544 | */ | 543 | */ |
545 | void kvm_irq_routing_update(struct kvm *kvm, | 544 | void kvm_irq_routing_update(struct kvm *kvm) |
546 | struct kvm_irq_routing_table *irq_rt) | ||
547 | { | 545 | { |
548 | struct _irqfd *irqfd; | 546 | struct _irqfd *irqfd; |
549 | 547 | ||
550 | spin_lock_irq(&kvm->irqfds.lock); | 548 | spin_lock_irq(&kvm->irqfds.lock); |
551 | 549 | ||
552 | rcu_assign_pointer(kvm->irq_routing, irq_rt); | ||
553 | |||
554 | list_for_each_entry(irqfd, &kvm->irqfds.items, list) | 550 | list_for_each_entry(irqfd, &kvm->irqfds.items, list) |
555 | irqfd_update(kvm, irqfd, irq_rt); | 551 | irqfd_update(kvm, irqfd); |
556 | 552 | ||
557 | spin_unlock_irq(&kvm->irqfds.lock); | 553 | spin_unlock_irq(&kvm->irqfds.lock); |
558 | } | 554 | } |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 175844593243..963b8995a9e8 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -163,7 +163,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) | |||
163 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; | 163 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; |
164 | struct kvm_kernel_irq_routing_entry *e; | 164 | struct kvm_kernel_irq_routing_entry *e; |
165 | int ret = -EINVAL; | 165 | int ret = -EINVAL; |
166 | struct kvm_irq_routing_table *irq_rt; | ||
167 | int idx; | 166 | int idx; |
168 | 167 | ||
169 | trace_kvm_set_irq(irq, level, irq_source_id); | 168 | trace_kvm_set_irq(irq, level, irq_source_id); |
@@ -177,8 +176,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) | |||
177 | * which is limited to 1:1 GSI mapping. | 176 | * which is limited to 1:1 GSI mapping. |
178 | */ | 177 | */ |
179 | idx = srcu_read_lock(&kvm->irq_srcu); | 178 | idx = srcu_read_lock(&kvm->irq_srcu); |
180 | irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); | 179 | if (kvm_irq_map_gsi(kvm, entries, irq) > 0) { |
181 | if (kvm_irq_map_gsi(entries, irq_rt, irq) > 0) { | ||
182 | e = &entries[0]; | 180 | e = &entries[0]; |
183 | if (likely(e->type == KVM_IRQ_ROUTING_MSI)) | 181 | if (likely(e->type == KVM_IRQ_ROUTING_MSI)) |
184 | ret = kvm_set_msi_inatomic(e, kvm); | 182 | ret = kvm_set_msi_inatomic(e, kvm); |
@@ -264,7 +262,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |||
264 | int idx, gsi; | 262 | int idx, gsi; |
265 | 263 | ||
266 | idx = srcu_read_lock(&kvm->irq_srcu); | 264 | idx = srcu_read_lock(&kvm->irq_srcu); |
267 | gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; | 265 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); |
268 | if (gsi != -1) | 266 | if (gsi != -1) |
269 | hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) | 267 | hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) |
270 | if (kimn->irq == gsi) | 268 | if (kimn->irq == gsi) |
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index f4648dd94888..04faac50cef5 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c | |||
@@ -31,12 +31,26 @@ | |||
31 | #include <trace/events/kvm.h> | 31 | #include <trace/events/kvm.h> |
32 | #include "irq.h" | 32 | #include "irq.h" |
33 | 33 | ||
34 | int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries, | 34 | struct kvm_irq_routing_table { |
35 | struct kvm_irq_routing_table *irq_rt, int gsi) | 35 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
36 | struct kvm_kernel_irq_routing_entry *rt_entries; | ||
37 | u32 nr_rt_entries; | ||
38 | /* | ||
39 | * Array indexed by gsi. Each entry contains list of irq chips | ||
40 | * the gsi is connected to. | ||
41 | */ | ||
42 | struct hlist_head map[0]; | ||
43 | }; | ||
44 | |||
45 | int kvm_irq_map_gsi(struct kvm *kvm, | ||
46 | struct kvm_kernel_irq_routing_entry *entries, int gsi) | ||
36 | { | 47 | { |
48 | struct kvm_irq_routing_table *irq_rt; | ||
37 | struct kvm_kernel_irq_routing_entry *e; | 49 | struct kvm_kernel_irq_routing_entry *e; |
38 | int n = 0; | 50 | int n = 0; |
39 | 51 | ||
52 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | ||
53 | lockdep_is_held(&kvm->irq_lock)); | ||
40 | if (gsi < irq_rt->nr_rt_entries) { | 54 | if (gsi < irq_rt->nr_rt_entries) { |
41 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | 55 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { |
42 | entries[n] = *e; | 56 | entries[n] = *e; |
@@ -47,21 +61,21 @@ int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries, | |||
47 | return n; | 61 | return n; |
48 | } | 62 | } |
49 | 63 | ||
50 | int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt, | 64 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) |
51 | unsigned irqchip, unsigned pin) | ||
52 | { | 65 | { |
66 | struct kvm_irq_routing_table *irq_rt; | ||
67 | |||
68 | irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); | ||
53 | return irq_rt->chip[irqchip][pin]; | 69 | return irq_rt->chip[irqchip][pin]; |
54 | } | 70 | } |
55 | 71 | ||
56 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) | 72 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) |
57 | { | 73 | { |
58 | struct kvm_irq_routing_table *irq_rt; | ||
59 | struct kvm_irq_ack_notifier *kian; | 74 | struct kvm_irq_ack_notifier *kian; |
60 | int gsi, idx; | 75 | int gsi, idx; |
61 | 76 | ||
62 | idx = srcu_read_lock(&kvm->irq_srcu); | 77 | idx = srcu_read_lock(&kvm->irq_srcu); |
63 | irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); | 78 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); |
64 | gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin); | ||
65 | if (gsi != -1) | 79 | if (gsi != -1) |
66 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | 80 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, |
67 | link) | 81 | link) |
@@ -78,15 +92,13 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); | |||
78 | 92 | ||
79 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | 93 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) |
80 | { | 94 | { |
81 | struct kvm_irq_routing_table *irq_rt; | ||
82 | struct kvm_irq_ack_notifier *kian; | 95 | struct kvm_irq_ack_notifier *kian; |
83 | int gsi, idx; | 96 | int gsi, idx; |
84 | 97 | ||
85 | trace_kvm_ack_irq(irqchip, pin); | 98 | trace_kvm_ack_irq(irqchip, pin); |
86 | 99 | ||
87 | idx = srcu_read_lock(&kvm->irq_srcu); | 100 | idx = srcu_read_lock(&kvm->irq_srcu); |
88 | irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); | 101 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); |
89 | gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin); | ||
90 | if (gsi != -1) | 102 | if (gsi != -1) |
91 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, | 103 | hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, |
92 | link) | 104 | link) |
@@ -143,7 +155,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | |||
143 | { | 155 | { |
144 | struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS]; | 156 | struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS]; |
145 | int ret = -1, i, idx; | 157 | int ret = -1, i, idx; |
146 | struct kvm_irq_routing_table *irq_rt; | ||
147 | 158 | ||
148 | trace_kvm_set_irq(irq, level, irq_source_id); | 159 | trace_kvm_set_irq(irq, level, irq_source_id); |
149 | 160 | ||
@@ -152,8 +163,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | |||
152 | * writes to the unused one. | 163 | * writes to the unused one. |
153 | */ | 164 | */ |
154 | idx = srcu_read_lock(&kvm->irq_srcu); | 165 | idx = srcu_read_lock(&kvm->irq_srcu); |
155 | irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); | 166 | i = kvm_irq_map_gsi(kvm, irq_set, irq); |
156 | i = kvm_irq_map_gsi(irq_set, irq_rt, irq); | ||
157 | srcu_read_unlock(&kvm->irq_srcu, idx); | 167 | srcu_read_unlock(&kvm->irq_srcu, idx); |
158 | 168 | ||
159 | while(i--) { | 169 | while(i--) { |
@@ -250,7 +260,8 @@ int kvm_set_irq_routing(struct kvm *kvm, | |||
250 | 260 | ||
251 | mutex_lock(&kvm->irq_lock); | 261 | mutex_lock(&kvm->irq_lock); |
252 | old = kvm->irq_routing; | 262 | old = kvm->irq_routing; |
253 | kvm_irq_routing_update(kvm, new); | 263 | rcu_assign_pointer(kvm->irq_routing, new); |
264 | kvm_irq_routing_update(kvm); | ||
254 | mutex_unlock(&kvm->irq_lock); | 265 | mutex_unlock(&kvm->irq_lock); |
255 | 266 | ||
256 | synchronize_srcu_expedited(&kvm->irq_srcu); | 267 | synchronize_srcu_expedited(&kvm->irq_srcu); |