irqbypass: Take ownership of producer/consumer token tracking

Move ownership of IRQ bypass token tracking into irqbypass.ko, and
explicitly require callers to pass an eventfd_ctx structure instead of a
completely opaque token.  Relying on producers and consumers to set the
token appropriately is error prone, and hiding the fact that the token must
be an eventfd_ctx pointer (for all intents and purposes) unnecessarily
obfuscates the code and makes it more brittle.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Link: https://lore.kernel.org/r/20250516230734.2564775-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2025-05-16 16:07:29 -07:00
parent 07fbc83c01
commit 2b521d86ee
6 changed files with 58 additions and 49 deletions

View file

@ -13669,8 +13669,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
printk(KERN_INFO "irq bypass consumer (eventfd %p) unregistration"
" fails: %d\n", irqfd->consumer.eventfd, ret);
spin_unlock_irq(&kvm->irqfds.lock);

View file

@ -505,15 +505,12 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
if (ret)
goto out_put_eventfd_ctx;
ctx->producer.token = trigger;
ctx->producer.irq = irq;
ret = irq_bypass_register_producer(&ctx->producer);
ret = irq_bypass_register_producer(&ctx->producer, trigger);
if (unlikely(ret)) {
dev_info(&pdev->dev,
"irq bypass producer (token %p) registration fails: %d\n",
ctx->producer.token, ret);
ctx->producer.token = NULL;
"irq bypass producer (eventfd %p) registration fails: %d\n",
trigger, ret);
}
ctx->trigger = trigger;

View file

@ -213,10 +213,10 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
ret = irq_bypass_register_producer(&vq->call_ctx.producer, vq->call_ctx.ctx);
if (unlikely(ret))
dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
qid, vq->call_ctx.producer.token, ret);
dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n",
qid, vq->call_ctx.ctx, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
@ -712,7 +712,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_unsetup_vq_irq(v, idx);
vq->call_ctx.producer.token = NULL;
}
break;
}
@ -753,7 +752,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
vq->call_ctx.producer.token = vq->call_ctx.ctx;
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_setup_vq_irq(v, idx);

View file

@ -10,6 +10,7 @@
#include <linux/list.h>
struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@ -18,20 +19,20 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
* via a shared token (ex. eventfd_ctx). Producers and consumers register
* independently. When a token match is found, the optional @stop callback
* will be called for each participant. The pair will then be connected via
* the @add_* callbacks, and finally the optional @start callback will allow
* any final coordination. When either participant is unregistered, the
* process is repeated using the @del_* callbacks in place of the @add_*
* callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
* are not supported.
* via a shared eventfd_ctx. Producers and consumers register independently.
* When a producer and consumer are paired, i.e. an eventfd match is found, the
* optional @stop callback will be called for each participant. The pair will
* then be connected via the @add_* callbacks, and finally the optional @start
* callback will allow any final coordination. When either participant is
* unregistered, the process is repeated using the @del_* callbacks in place of
* the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
* pairings are not supported.
*/
/**
* struct irq_bypass_producer - IRQ bypass producer definition
* @node: IRQ bypass manager private list management
* @token: opaque token to match between producer and consumer (non-NULL)
* @eventfd: eventfd context used to match producers and consumers
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@ -44,7 +45,7 @@ struct irq_bypass_consumer;
*/
struct irq_bypass_producer {
struct list_head node;
void *token;
struct eventfd_ctx *eventfd;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@ -57,7 +58,7 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
* @node: IRQ bypass manager private list management
* @token: opaque token to match between producer and consumer (non-NULL)
* @eventfd: eventfd context used to match producers and consumers
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@ -70,7 +71,7 @@ struct irq_bypass_producer {
*/
struct irq_bypass_consumer {
struct list_head node;
void *token;
struct eventfd_ctx *eventfd;
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@ -79,9 +80,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
int irq_bypass_register_producer(struct irq_bypass_producer *);
void irq_bypass_unregister_producer(struct irq_bypass_producer *);
int irq_bypass_register_consumer(struct irq_bypass_consumer *);
void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
int irq_bypass_register_producer(struct irq_bypass_producer *producer,
struct eventfd_ctx *eventfd);
void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
struct eventfd_ctx *eventfd);
void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */

View file

@ -426,15 +426,14 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
irqfd->consumer.start = kvm_arch_irq_bypass_start;
ret = irq_bypass_register_consumer(&irqfd->consumer);
ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd);
if (ret)
pr_info("irq bypass consumer (token %p) registration fails: %d\n",
irqfd->consumer.token, ret);
pr_info("irq bypass consumer (eventfd %p) registration fails: %d\n",
irqfd->eventfd, ret);
}
#endif

View file

@ -77,30 +77,32 @@ static void __disconnect(struct irq_bypass_producer *prod,
/**
* irq_bypass_register_producer - register IRQ bypass producer
* @producer: pointer to producer structure
* @eventfd: pointer to the eventfd context associated with the producer
*
* Add the provided IRQ producer to the list of producers and connect
* with any matching token found on the IRQ consumers list.
* with any matching eventfd found on the IRQ consumers list.
*/
int irq_bypass_register_producer(struct irq_bypass_producer *producer)
int irq_bypass_register_producer(struct irq_bypass_producer *producer,
struct eventfd_ctx *eventfd)
{
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
int ret;
if (!producer->token)
if (WARN_ON_ONCE(producer->eventfd))
return -EINVAL;
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
if (tmp->token == producer->token) {
if (tmp->eventfd == eventfd) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(consumer, &consumers, node) {
if (consumer->token == producer->token) {
if (consumer->eventfd == eventfd) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
@ -108,6 +110,7 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer)
}
}
producer->eventfd = eventfd;
list_add(&producer->node, &producers);
mutex_unlock(&lock);
@ -131,26 +134,28 @@ void irq_bypass_unregister_producer(struct irq_bypass_producer *producer)
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
if (!producer->token)
if (!producer->eventfd)
return;
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
if (tmp->token != producer->token)
if (tmp->eventfd != producer->eventfd)
continue;
list_for_each_entry(consumer, &consumers, node) {
if (consumer->token == producer->token) {
if (consumer->eventfd == producer->eventfd) {
__disconnect(producer, consumer);
break;
}
}
producer->eventfd = NULL;
list_del(&producer->node);
break;
}
WARN_ON_ONCE(producer->eventfd);
mutex_unlock(&lock);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
@ -158,31 +163,35 @@ EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
/**
* irq_bypass_register_consumer - register IRQ bypass consumer
* @consumer: pointer to consumer structure
* @eventfd: pointer to the eventfd context associated with the consumer
*
* Add the provided IRQ consumer to the list of consumers and connect
* with any matching token found on the IRQ producer list.
* with any matching eventfd found on the IRQ producer list.
*/
int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
struct eventfd_ctx *eventfd)
{
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
int ret;
if (!consumer->token ||
!consumer->add_producer || !consumer->del_producer)
if (WARN_ON_ONCE(consumer->eventfd))
return -EINVAL;
if (!consumer->add_producer || !consumer->del_producer)
return -EINVAL;
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
if (tmp->token == consumer->token || tmp == consumer) {
if (tmp->eventfd == eventfd) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(producer, &producers, node) {
if (producer->token == consumer->token) {
if (producer->eventfd == eventfd) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
@ -190,6 +199,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
}
}
consumer->eventfd = eventfd;
list_add(&consumer->node, &consumers);
mutex_unlock(&lock);
@ -213,7 +223,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
if (!consumer->token)
if (!consumer->eventfd)
return;
mutex_lock(&lock);
@ -223,16 +233,18 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
continue;
list_for_each_entry(producer, &producers, node) {
if (producer->token == consumer->token) {
if (producer->eventfd == consumer->eventfd) {
__disconnect(producer, consumer);
break;
}
}
consumer->eventfd = NULL;
list_del(&consumer->node);
break;
}
WARN_ON_ONCE(consumer->eventfd);
mutex_unlock(&lock);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);