aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorBhaktipriya Shridhar <bhaktipriya96@gmail.com>2016-08-30 13:59:51 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-09-07 13:34:28 -0400
commit3706feacd007c89f49bdb20c5c1dd17c8badfc43 (patch)
treea719e04c4f1871e8271eb3d2091cef727a3288db /virt
parentf15a75eedc18e1fece8e01f1a6f7d135e61b0750 (diff)
KVM: Remove deprecated create_singlethread_workqueue
The workqueue "irqfd_cleanup_wq" queues a single work item &irqfd->shutdown and hence doesn't require ordering. It is a host-wide workqueue for issuing deferred shutdown requests aggregated from all vm* instances. It is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. The work item has been flushed in kvm_irqfd_release(). The workqueue "wqueue" queues a single work item &timer->expired and hence doesn't require ordering. Also, it is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. System workqueues have been able to handle high level of concurrency for a long time now and hence it's not required to have a singlethreaded workqueue just to gain concurrency. Unlike a dedicated per-cpu workqueue created with create_singlethread_workqueue(), system_wq allows multiple work items to overlap executions even on the same CPU; however, a per-cpu workqueue doesn't have any CPU locality or global ordering guarantee unless the target CPU is explicitly specified and thus the increase of local concurrency shouldn't make any difference. Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/eventfd.c22
-rw-r--r--virt/kvm/kvm_main.c6
3 files changed, 5 insertions, 34 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 77e6ccf14901..4309b60ebf17 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -31,7 +31,6 @@
31#include "trace.h" 31#include "trace.h"
32 32
33static struct timecounter *timecounter; 33static struct timecounter *timecounter;
34static struct workqueue_struct *wqueue;
35static unsigned int host_vtimer_irq; 34static unsigned int host_vtimer_irq;
36static u32 host_vtimer_irq_flags; 35static u32 host_vtimer_irq_flags;
37 36
@@ -141,7 +140,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
141 return HRTIMER_RESTART; 140 return HRTIMER_RESTART;
142 } 141 }
143 142
144 queue_work(wqueue, &timer->expired); 143 schedule_work(&timer->expired);
145 return HRTIMER_NORESTART; 144 return HRTIMER_NORESTART;
146} 145}
147 146
@@ -449,12 +448,6 @@ int kvm_timer_hyp_init(void)
449 goto out; 448 goto out;
450 } 449 }
451 450
452 wqueue = create_singlethread_workqueue("kvm_arch_timer");
453 if (!wqueue) {
454 err = -ENOMEM;
455 goto out_free;
456 }
457
458 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 451 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
459 452
460 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 453 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
@@ -518,7 +511,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
518 * VCPUs have the enabled variable set, before entering the guest, if 511 * VCPUs have the enabled variable set, before entering the guest, if
519 * the arch timers are enabled. 512 * the arch timers are enabled.
520 */ 513 */
521 if (timecounter && wqueue) 514 if (timecounter)
522 timer->enabled = 1; 515 timer->enabled = 1;
523 516
524 return 0; 517 return 0;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index e469b6012471..f397e9b20370 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,7 +42,6 @@
42 42
43#ifdef CONFIG_HAVE_KVM_IRQFD 43#ifdef CONFIG_HAVE_KVM_IRQFD
44 44
45static struct workqueue_struct *irqfd_cleanup_wq;
46 45
47static void 46static void
48irqfd_inject(struct work_struct *work) 47irqfd_inject(struct work_struct *work)
@@ -168,7 +167,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
168 167
169 list_del_init(&irqfd->list); 168 list_del_init(&irqfd->list);
170 169
171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown); 170 schedule_work(&irqfd->shutdown);
172} 171}
173 172
174int __attribute__((weak)) kvm_arch_set_irq_inatomic( 173int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -555,7 +554,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
555 * so that we guarantee there will not be any more interrupts on this 554 * so that we guarantee there will not be any more interrupts on this
556 * gsi once this deassign function returns. 555 * gsi once this deassign function returns.
557 */ 556 */
558 flush_workqueue(irqfd_cleanup_wq); 557 flush_work(&irqfd->shutdown);
559 558
560 return 0; 559 return 0;
561} 560}
@@ -592,7 +591,7 @@ kvm_irqfd_release(struct kvm *kvm)
592 * Block until we know all outstanding shutdown jobs have completed 591 * Block until we know all outstanding shutdown jobs have completed
593 * since we do not take a kvm* reference. 592 * since we do not take a kvm* reference.
594 */ 593 */
595 flush_workqueue(irqfd_cleanup_wq); 594 flush_work(&irqfd->shutdown);
596 595
597} 596}
598 597
@@ -622,23 +621,8 @@ void kvm_irq_routing_update(struct kvm *kvm)
622 spin_unlock_irq(&kvm->irqfds.lock); 621 spin_unlock_irq(&kvm->irqfds.lock);
623} 622}
624 623
625/*
626 * create a host-wide workqueue for issuing deferred shutdown requests
627 * aggregated from all vm* instances. We need our own isolated single-thread
628 * queue to prevent deadlock against flushing the normal work-queue.
629 */
630int kvm_irqfd_init(void)
631{
632 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
633 if (!irqfd_cleanup_wq)
634 return -ENOMEM;
635
636 return 0;
637}
638
639void kvm_irqfd_exit(void) 624void kvm_irqfd_exit(void)
640{ 625{
641 destroy_workqueue(irqfd_cleanup_wq);
642} 626}
643#endif 627#endif
644 628
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 195078225aa5..b3fa12ce1166 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3807,12 +3807,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3807 * kvm_arch_init makes sure there's at most one caller 3807 * kvm_arch_init makes sure there's at most one caller
3808 * for architectures that support multiple implementations, 3808 * for architectures that support multiple implementations,
3809 * like intel and amd on x86. 3809 * like intel and amd on x86.
3810 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3811 * conflicts in case kvm is already setup for another implementation.
3812 */ 3810 */
3813 r = kvm_irqfd_init();
3814 if (r)
3815 goto out_irqfd;
3816 3811
3817 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3812 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3818 r = -ENOMEM; 3813 r = -ENOMEM;
@@ -3894,7 +3889,6 @@ out_free_0a:
3894 free_cpumask_var(cpus_hardware_enabled); 3889 free_cpumask_var(cpus_hardware_enabled);
3895out_free_0: 3890out_free_0:
3896 kvm_irqfd_exit(); 3891 kvm_irqfd_exit();
3897out_irqfd:
3898 kvm_arch_exit(); 3892 kvm_arch_exit();
3899out_fail: 3893out_fail:
3900 return r; 3894 return r;