mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	KVM: Remove deprecated create_singlethread_workqueue
The workqueue "irqfd_cleanup_wq" queues a single work item &irqfd->shutdown and hence doesn't require ordering. It is a host-wide workqueue for issuing deferred shutdown requests aggregated from all vm* instances. It is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. The work item has been flushed in kvm_irqfd_release(). The workqueue "wqueue" queues a single work item &timer->expired and hence doesn't require ordering. Also, it is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. System workqueues have been able to handle high level of concurrency for a long time now and hence it's not required to have a singlethreaded workqueue just to gain concurrency. Unlike a dedicated per-cpu workqueue created with create_singlethread_workqueue(), system_wq allows multiple work items to overlap executions even on the same CPU; however, a per-cpu workqueue doesn't have any CPU locality or global ordering guarantee unless the target CPU is explicitly specified and thus the increase of local concurrency shouldn't make any difference. Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									f15a75eedc
								
							
						
					
					
						commit
						3706feacd0
					
				
					 3 changed files with 5 additions and 34 deletions
				
			
		| 
						 | 
				
			
			@ -31,7 +31,6 @@
 | 
			
		|||
#include "trace.h"
 | 
			
		||||
 | 
			
		||||
static struct timecounter *timecounter;
 | 
			
		||||
static struct workqueue_struct *wqueue;
 | 
			
		||||
static unsigned int host_vtimer_irq;
 | 
			
		||||
static u32 host_vtimer_irq_flags;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -141,7 +140,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
 | 
			
		|||
		return HRTIMER_RESTART;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	queue_work(wqueue, &timer->expired);
 | 
			
		||||
	schedule_work(&timer->expired);
 | 
			
		||||
	return HRTIMER_NORESTART;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -449,12 +448,6 @@ int kvm_timer_hyp_init(void)
 | 
			
		|||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	wqueue = create_singlethread_workqueue("kvm_arch_timer");
 | 
			
		||||
	if (!wqueue) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
		goto out_free;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
 | 
			
		||||
 | 
			
		||||
	cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
 | 
			
		||||
| 
						 | 
				
			
			@ -518,7 +511,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 | 
			
		|||
	 * VCPUs have the enabled variable set, before entering the guest, if
 | 
			
		||||
	 * the arch timers are enabled.
 | 
			
		||||
	 */
 | 
			
		||||
	if (timecounter && wqueue)
 | 
			
		||||
	if (timecounter)
 | 
			
		||||
		timer->enabled = 1;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -42,7 +42,6 @@
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_HAVE_KVM_IRQFD
 | 
			
		||||
 | 
			
		||||
static struct workqueue_struct *irqfd_cleanup_wq;
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
irqfd_inject(struct work_struct *work)
 | 
			
		||||
| 
						 | 
				
			
			@ -168,7 +167,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
 | 
			
		|||
 | 
			
		||||
	list_del_init(&irqfd->list);
 | 
			
		||||
 | 
			
		||||
	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
 | 
			
		||||
	schedule_work(&irqfd->shutdown);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
 | 
			
		||||
| 
						 | 
				
			
			@ -555,7 +554,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
 | 
			
		|||
	 * so that we guarantee there will not be any more interrupts on this
 | 
			
		||||
	 * gsi once this deassign function returns.
 | 
			
		||||
	 */
 | 
			
		||||
	flush_workqueue(irqfd_cleanup_wq);
 | 
			
		||||
	flush_work(&irqfd->shutdown);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -592,7 +591,7 @@ kvm_irqfd_release(struct kvm *kvm)
 | 
			
		|||
	 * Block until we know all outstanding shutdown jobs have completed
 | 
			
		||||
	 * since we do not take a kvm* reference.
 | 
			
		||||
	 */
 | 
			
		||||
	flush_workqueue(irqfd_cleanup_wq);
 | 
			
		||||
	flush_work(&irqfd->shutdown);
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -622,23 +621,8 @@ void kvm_irq_routing_update(struct kvm *kvm)
 | 
			
		|||
	spin_unlock_irq(&kvm->irqfds.lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * create a host-wide workqueue for issuing deferred shutdown requests
 | 
			
		||||
 * aggregated from all vm* instances. We need our own isolated single-thread
 | 
			
		||||
 * queue to prevent deadlock against flushing the normal work-queue.
 | 
			
		||||
 */
 | 
			
		||||
int kvm_irqfd_init(void)
 | 
			
		||||
{
 | 
			
		||||
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
 | 
			
		||||
	if (!irqfd_cleanup_wq)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kvm_irqfd_exit(void)
 | 
			
		||||
{
 | 
			
		||||
	destroy_workqueue(irqfd_cleanup_wq);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3807,12 +3807,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 | 
			
		|||
	 * kvm_arch_init makes sure there's at most one caller
 | 
			
		||||
	 * for architectures that support multiple implementations,
 | 
			
		||||
	 * like intel and amd on x86.
 | 
			
		||||
	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
 | 
			
		||||
	 * conflicts in case kvm is already setup for another implementation.
 | 
			
		||||
	 */
 | 
			
		||||
	r = kvm_irqfd_init();
 | 
			
		||||
	if (r)
 | 
			
		||||
		goto out_irqfd;
 | 
			
		||||
 | 
			
		||||
	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
 | 
			
		||||
		r = -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -3894,7 +3889,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 | 
			
		|||
	free_cpumask_var(cpus_hardware_enabled);
 | 
			
		||||
out_free_0:
 | 
			
		||||
	kvm_irqfd_exit();
 | 
			
		||||
out_irqfd:
 | 
			
		||||
	kvm_arch_exit();
 | 
			
		||||
out_fail:
 | 
			
		||||
	return r;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue