vhost_vdpa: implement IRQ offloading in vhost_vdpa

This patch introduce a set of functions for setup/unsetup
and update irq offloading respectively by register/unregister
and re-register the irq_bypass_producer.

With these functions, this commit can setup/unsetup
irq offloading through setting DRIVER_OK/!DRIVER_OK, and
update irq offloading through SET_VRING_CALL.

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Suggested-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731065533.4144-5-lingshan.zhu@intel.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Zhu Lingshan 2020-07-31 14:55:31 +08:00 committed by Michael S. Tsirkin
parent 7164675ab5
commit 2cf1ba9a4d
2 changed files with 63 additions and 1 deletions

View File

@ -65,6 +65,7 @@ config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend" tristate "Vhost driver for vDPA-based backend"
depends on EVENTFD depends on EVENTFD
select VHOST select VHOST
select IRQ_BYPASS_MANAGER
depends on VDPA depends on VDPA
help help
This kernel module can be loaded in host kernel to accelerate This kernel module can be loaded in host kernel to accelerate

View File

@ -82,6 +82,39 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
const struct vdpa_config_ops *ops = v->vdpa->config;
struct vdpa_device *vdpa = v->vdpa;
int ret, irq;
if (!ops->get_vq_irq)
return;
irq = ops->get_vq_irq(vdpa, qid);
spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx || irq < 0) {
spin_unlock(&vq->call_ctx.ctx_lock);
return;
}
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
spin_unlock(&vq->call_ctx.ctx_lock);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
spin_unlock(&vq->call_ctx.ctx_lock);
}
static void vhost_vdpa_reset(struct vhost_vdpa *v) static void vhost_vdpa_reset(struct vhost_vdpa *v)
{ {
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
@ -121,11 +154,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
{ {
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
u8 status; u8 status, status_old;
int nvqs = v->nvqs;
u16 i;
if (copy_from_user(&status, statusp, sizeof(status))) if (copy_from_user(&status, statusp, sizeof(status)))
return -EFAULT; return -EFAULT;
status_old = ops->get_status(vdpa);
/* /*
* Userspace shouldn't remove status bits unless reset the * Userspace shouldn't remove status bits unless reset the
* status to 0. * status to 0.
@ -135,6 +172,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
ops->set_status(vdpa, status); ops->set_status(vdpa, status);
/* vq irq is not expected to be changed once DRIVER_OK is set */
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_setup_vq_irq(v, i);
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
vhost_vdpa_unsetup_vq_irq(v, i);
return 0; return 0;
} }
@ -293,6 +339,7 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0; return 0;
} }
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp) void __user *argp)
{ {
@ -351,6 +398,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
cb.private = NULL; cb.private = NULL;
} }
ops->set_vq_cb(vdpa, idx, &cb); ops->set_vq_cb(vdpa, idx, &cb);
vhost_vdpa_setup_vq_irq(v, idx);
break; break;
case VHOST_SET_VRING_NUM: case VHOST_SET_VRING_NUM:
@ -726,6 +774,18 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
return r; return r;
} }
static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < v->nvqs; i++) {
vq = &v->vqs[i];
if (vq->call_ctx.producer.irq)
irq_bypass_unregister_producer(&vq->call_ctx.producer);
}
}
static int vhost_vdpa_release(struct inode *inode, struct file *filep) static int vhost_vdpa_release(struct inode *inode, struct file *filep)
{ {
struct vhost_vdpa *v = filep->private_data; struct vhost_vdpa *v = filep->private_data;
@ -738,6 +798,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_vdpa_iotlb_free(v); vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v); vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v); vhost_vdpa_config_put(v);
vhost_vdpa_clean_irq(v);
vhost_dev_cleanup(&v->vdev); vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs); kfree(v->vdev.vqs);
mutex_unlock(&d->mutex); mutex_unlock(&d->mutex);