KVM: Remove deprecated create_singlethread_workqueue
authorBhaktipriya Shridhar <bhaktipriya96@gmail.com>
Tue, 30 Aug 2016 17:59:51 +0000 (23:29 +0530)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 7 Sep 2016 17:34:28 +0000 (19:34 +0200)
The workqueue "irqfd_cleanup_wq" queues a single work item
&irqfd->shutdown and hence doesn't require ordering. It is a host-wide
workqueue for issuing deferred shutdown requests aggregated from all
vm* instances. It is not being used on a memory reclaim path.
Hence, it has been converted to use system_wq.
The work item has been flushed in kvm_irqfd_release().

The workqueue "wqueue" queues a single work item &timer->expired
and hence doesn't require ordering. Also, it is not being used on
a memory reclaim path. Hence, it has been converted to use system_wq.

System workqueues have been able to handle high level of concurrency
for a long time now and hence it's not required to have a singlethreaded
workqueue just to gain concurrency. Unlike a dedicated per-cpu workqueue
created with create_singlethread_workqueue(), system_wq allows multiple
work items to overlap executions even on the same CPU; however, a
per-cpu workqueue doesn't have any CPU locality or global ordering
guarantee unless the target CPU is explicitly specified and thus the
increase of local concurrency shouldn't make any difference.

Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
virt/kvm/arm/arch_timer.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 77e6ccf149011b2f3b43ba01aa1454fd08889dc8..4309b60ebf171606467e8b4615c8830bf76d6a52 100644 (file)
@@ -31,7 +31,6 @@
 #include "trace.h"
 
 static struct timecounter *timecounter;
-static struct workqueue_struct *wqueue;
 static unsigned int host_vtimer_irq;
 static u32 host_vtimer_irq_flags;
 
@@ -141,7 +140,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
                return HRTIMER_RESTART;
        }
 
-       queue_work(wqueue, &timer->expired);
+       schedule_work(&timer->expired);
        return HRTIMER_NORESTART;
 }
 
@@ -449,12 +448,6 @@ int kvm_timer_hyp_init(void)
                goto out;
        }
 
-       wqueue = create_singlethread_workqueue("kvm_arch_timer");
-       if (!wqueue) {
-               err = -ENOMEM;
-               goto out_free;
-       }
-
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
 
        cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
@@ -518,7 +511,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
         * VCPUs have the enabled variable set, before entering the guest, if
         * the arch timers are enabled.
         */
-       if (timecounter && wqueue)
+       if (timecounter)
                timer->enabled = 1;
 
        return 0;
index e469b60124718e60740e24857d15ae7b8a503552..f397e9b20370a2fb547b04fe555802846b2e9aef 100644 (file)
@@ -42,7 +42,6 @@
 
 #ifdef CONFIG_HAVE_KVM_IRQFD
 
-static struct workqueue_struct *irqfd_cleanup_wq;
 
 static void
 irqfd_inject(struct work_struct *work)
@@ -168,7 +167,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
 
        list_del_init(&irqfd->list);
 
-       queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
+       schedule_work(&irqfd->shutdown);
 }
 
 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -555,7 +554,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
         * so that we guarantee there will not be any more interrupts on this
         * gsi once this deassign function returns.
         */
-       flush_workqueue(irqfd_cleanup_wq);
+       flush_work(&irqfd->shutdown);
 
        return 0;
 }
@@ -592,7 +591,7 @@ kvm_irqfd_release(struct kvm *kvm)
         * Block until we know all outstanding shutdown jobs have completed
         * since we do not take a kvm* reference.
         */
-       flush_workqueue(irqfd_cleanup_wq);
+       flush_work(&irqfd->shutdown);
 
 }
 
@@ -622,23 +621,8 @@ void kvm_irq_routing_update(struct kvm *kvm)
        spin_unlock_irq(&kvm->irqfds.lock);
 }
 
-/*
- * create a host-wide workqueue for issuing deferred shutdown requests
- * aggregated from all vm* instances. We need our own isolated single-thread
- * queue to prevent deadlock against flushing the normal work-queue.
- */
-int kvm_irqfd_init(void)
-{
-       irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
-       if (!irqfd_cleanup_wq)
-               return -ENOMEM;
-
-       return 0;
-}
-
 void kvm_irqfd_exit(void)
 {
-       destroy_workqueue(irqfd_cleanup_wq);
 }
 #endif
 
index 195078225aa5d0c3b3214fd40e0b5f441c5d7518..b3fa12ce116687b1b7ae8e468d5c71637e26ca3e 100644 (file)
@@ -3807,12 +3807,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
         * kvm_arch_init makes sure there's at most one caller
         * for architectures that support multiple implementations,
         * like intel and amd on x86.
-        * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
-        * conflicts in case kvm is already setup for another implementation.
         */
-       r = kvm_irqfd_init();
-       if (r)
-               goto out_irqfd;
 
        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
                r = -ENOMEM;
@@ -3894,7 +3889,6 @@ out_free_0a:
        free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
        kvm_irqfd_exit();
-out_irqfd:
        kvm_arch_exit();
 out_fail:
        return r;