aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/irq_comm.c
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2014-01-16 07:44:20 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-05 10:29:11 -0400
commit719d93cd5f5c5c8775b7a38192069e8e1d1ac46e (patch)
treee6d7703b4b69acf92db962fbe96d9f7c380484c8 /virt/kvm/irq_comm.c
parent57b5981cd38cbca3554c5e663b2361d9adea70c2 (diff)
kvm/irqchip: Speed up KVM_SET_GSI_ROUTING
When starting lots of dataplane devices the bootup takes very long on Christian's s390 with irqfd patches. With larger setups he is even able to trigger some timeouts in some components. Turns out that the KVM_SET_GSI_ROUTING ioctl takes very long (strace claims up to 0.1 sec) when having multiple CPUs. This is caused by the synchronize_rcu and the HZ=100 of s390. By changing the code to use a private srcu we can speed things up. This patch reduces the boot time till mounting root from 8 to 2 seconds on my s390 guest with 100 disks. Uses of hlist_for_each_entry_rcu, hlist_add_head_rcu, hlist_del_init_rcu are fine because they do not have lockdep checks (hlist_for_each_entry_rcu uses rcu_dereference_raw rather than rcu_dereference, and write-sides do not do rcu lockdep at all). Note that we're hardly relying on the "sleepable" part of srcu. We just want SRCU's faster detection of grace periods. Testing was done by Andrew Theurer using netperf tests STREAM, MAERTS and RR. The difference between results "before" and "after" the patch has mean -0.2% and standard deviation 0.6%. Using a paired t-test on the data points says that there is a 2.5% probability that the patch is the cause of the performance difference (rather than a random fluctuation). (Restricting the t-test to RR, which is the most likely to be affected, changes the numbers to respectively -0.3% mean, 0.7% stdev, and 8% probability that the numbers actually say something about the patch. The probability increases mostly because there are fewer data points). Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Michael S. Tsirkin <mst@redhat.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # s390 Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/irq_comm.c')
-rw-r--r--virt/kvm/irq_comm.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index e2e6b4473a96..ced4a542a031 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -163,6 +163,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
163 struct kvm_kernel_irq_routing_entry *e; 163 struct kvm_kernel_irq_routing_entry *e;
164 int ret = -EINVAL; 164 int ret = -EINVAL;
165 struct kvm_irq_routing_table *irq_rt; 165 struct kvm_irq_routing_table *irq_rt;
166 int idx;
166 167
167 trace_kvm_set_irq(irq, level, irq_source_id); 168 trace_kvm_set_irq(irq, level, irq_source_id);
168 169
@@ -174,8 +175,8 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
174 * Since there's no easy way to do this, we only support injecting MSI 175 * Since there's no easy way to do this, we only support injecting MSI
175 * which is limited to 1:1 GSI mapping. 176 * which is limited to 1:1 GSI mapping.
176 */ 177 */
177 rcu_read_lock(); 178 idx = srcu_read_lock(&kvm->irq_srcu);
178 irq_rt = rcu_dereference(kvm->irq_routing); 179 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
179 if (irq < irq_rt->nr_rt_entries) 180 if (irq < irq_rt->nr_rt_entries)
180 hlist_for_each_entry(e, &irq_rt->map[irq], link) { 181 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
181 if (likely(e->type == KVM_IRQ_ROUTING_MSI)) 182 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
@@ -184,7 +185,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
184 ret = -EWOULDBLOCK; 185 ret = -EWOULDBLOCK;
185 break; 186 break;
186 } 187 }
187 rcu_read_unlock(); 188 srcu_read_unlock(&kvm->irq_srcu, idx);
188 return ret; 189 return ret;
189} 190}
190 191
@@ -253,22 +254,22 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
253 mutex_lock(&kvm->irq_lock); 254 mutex_lock(&kvm->irq_lock);
254 hlist_del_rcu(&kimn->link); 255 hlist_del_rcu(&kimn->link);
255 mutex_unlock(&kvm->irq_lock); 256 mutex_unlock(&kvm->irq_lock);
256 synchronize_rcu(); 257 synchronize_srcu(&kvm->irq_srcu);
257} 258}
258 259
259void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, 260void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
260 bool mask) 261 bool mask)
261{ 262{
262 struct kvm_irq_mask_notifier *kimn; 263 struct kvm_irq_mask_notifier *kimn;
263 int gsi; 264 int idx, gsi;
264 265
265 rcu_read_lock(); 266 idx = srcu_read_lock(&kvm->irq_srcu);
266 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 267 gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
267 if (gsi != -1) 268 if (gsi != -1)
268 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) 269 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
269 if (kimn->irq == gsi) 270 if (kimn->irq == gsi)
270 kimn->func(kimn, mask); 271 kimn->func(kimn, mask);
271 rcu_read_unlock(); 272 srcu_read_unlock(&kvm->irq_srcu, idx);
272} 273}
273 274
274int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 275int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,