vhost scsi: fix cmd completion race

We might not do the final se_cmd put from vhost_scsi_complete_cmd_work.
When the last put happens a little later then we could race where
vhost_scsi_complete_cmd_work does vhost_signal, the guest runs and sends
more IO, and vhost_scsi_handle_vq runs but does not find any free cmds.

This patch has us delay completing the cmd until the last lio core ref
is dropped. We then know that once we signal to the guest that the cmd
is completed that if it queues a new command it will find a free cmd.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Reviewed-by: Maurizio Lombardi <mlombard@redhat.com>
Link: https://lore.kernel.org/r/1604986403-4931-4-git-send-email-michael.christie@oracle.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Mike Christie 2020-11-09 23:33:21 -06:00 committed by Michael S. Tsirkin
parent 25b98b64e2
commit 47a3565e8b

View File

@ -322,7 +322,7 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
@ -344,6 +344,16 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
vhost_scsi_put_inflight(inflight);
}
static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi *vs = cmd->tvc_vhost;
llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
{
return 0;
@ -366,28 +376,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
{
struct vhost_scsi *vs = cmd->tvc_vhost;
llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd);
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd);
transport_generic_free_cmd(se_cmd, 0);
return 0;
}
@ -433,15 +430,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
return evt;
}
static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
/* TODO locking against target/backend threads? */
transport_generic_free_cmd(se_cmd, 0);
}
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
@ -560,7 +548,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
vhost_scsi_free_cmd(cmd);
vhost_scsi_release_cmd_res(se_cmd);
}
vq = -1;
@ -1091,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
&prot_iter, exp_data_len,
&data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}