habanalabs: Assign each CQ with its own work queue

We identified a possible race during job completion when working
with a single multi-threaded work queue. In order to overcome this
race we suggest using a single threaded work queue per completion
queue, hence we guarantee jobs completion in order.

Signed-off-by: Ofir Bitton <obitton@habana.ai>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
This commit is contained in:
Ofir Bitton 2020-07-05 13:35:51 +03:00 committed by Oded Gabbay
parent c83c417193
commit 5574cb2194
4 changed files with 40 additions and 12 deletions

View File

@ -487,10 +487,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
void hl_cs_rollback_all(struct hl_device *hdev)
{
int i;
struct hl_cs *cs, *tmp;
/* flush all completions */
flush_workqueue(hdev->cq_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
/* Make sure we don't have leftovers in the H/W queues mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,

View File

@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
*/
static int device_early_init(struct hl_device *hdev)
{
int rc;
int i, rc;
char workq_name[32];
switch (hdev->asic_type) {
case ASIC_GOYA:
@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev)
if (rc)
goto early_fini;
hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
if (hdev->cq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM;
goto asid_fini;
if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
sizeof(*hdev->cq_wq),
GFP_ATOMIC);
if (!hdev->cq_wq) {
rc = -ENOMEM;
goto asid_fini;
}
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl-free-jobs-%u", i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM;
goto free_cq_wq;
}
}
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
@ -321,7 +335,10 @@ static int device_early_init(struct hl_device *hdev)
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
destroy_workqueue(hdev->cq_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
if (hdev->cq_wq[i])
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
asid_fini:
hl_asid_fini(hdev);
early_fini:
@ -339,6 +356,8 @@ static int device_early_init(struct hl_device *hdev)
*/
static void device_early_fini(struct hl_device *hdev)
{
int i;
mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->cq_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
hl_asid_fini(hdev);
@ -1181,6 +1203,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
"failed to initialize completion queue\n");
goto cq_fini;
}
hdev->completion_queue[i].cq_idx = i;
}
/*

View File

@ -479,6 +479,7 @@ struct hl_hw_queue {
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
* @cq_idx: completion queue index in array
* @hw_queue_id: the id of the matching H/W queue
* @ci: ci inside the queue
* @pi: pi inside the queue
@ -488,6 +489,7 @@ struct hl_cq {
struct hl_device *hdev;
u64 kernel_address;
dma_addr_t bus_address;
u32 cq_idx;
u32 hw_queue_id;
u32 ci;
u32 pi;
@ -1396,7 +1398,8 @@ struct hl_device_idle_busy_ts {
* @asic_name: ASIC specific nmae.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
* @cq_wq: work queue of completion queues for executing work in process context
* @cq_wq: work queues of completion queues for executing work in process
* context.
* @eq_wq: work queue of event queue for executing work in process context.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
@ -1492,7 +1495,7 @@ struct hl_device {
char asic_name[16];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct workqueue_struct *cq_wq;
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;

View File

@ -119,7 +119,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if ((shadow_index_valid) && (!hdev->disabled)) {
job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
queue_work(hdev->cq_wq, &job->finish_work);
queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
}
atomic_inc(&queue->ci);