drm/virtio: notify virtqueues without holding spinlock

Split virtqueue_kick() call into virtqueue_kick_prepare(), which
requires serialization, and virtqueue_notify(), which does not.  Move
the virtqueue_notify() call out of the critical section protected by the
queue lock.  This avoids triggering a vmexit while holding the lock and
thereby fixes a rather bad spinlock contention.

Suggested-by: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-3-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann 2019-08-13 10:25:09 +02:00
parent 4100a7b834
commit 744583ecc3

View File

@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp;
int outcnt = 0, incnt = 0;
bool notify = false;
int ret;
if (!vgdev->vqs_ready)
return;
return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd;
@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq);
notify = virtqueue_kick_prepare(vq);
}
return notify;
}
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
bool notify;
spin_lock(&vgdev->ctrlq.qlock);
virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock);
if (notify)
virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify;
again:
spin_lock(&vgdev->ctrlq.qlock);
@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (fence)
virtio_gpu_fence_emit(vgdev, hdr, fence);
virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock);
if (notify)
virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
bool notify;
int ret;
int outcnt;
@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq);
notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
if (notify)
virtqueue_notify(vq);
}
/* just create gem objects for userspace and long lived objects,