mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 15:06:46 +07:00
drm/amdgpu: add amdgpu_job_submit_direct helper
Make sure that we properly initialize at least the sched member. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3320b8d2ac
commit
ee913fd9e1
@ -140,6 +140,21 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
int r;
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
|
@ -33,6 +33,8 @@
|
||||
#define to_amdgpu_job(sched_job) \
|
||||
container_of((sched_job), struct amdgpu_job, base)
|
||||
|
||||
struct amdgpu_fence;
|
||||
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_device *adev;
|
||||
@ -68,4 +70,6 @@ void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f);
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct dma_fence **fence);
|
||||
#endif
|
||||
|
@ -2075,24 +2075,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
if (direct_submit) {
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
|
||||
NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
if (direct_submit)
|
||||
r = amdgpu_job_submit_direct(job, ring, fence);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
}
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
return r;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1062,12 +1062,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
if (r < 0)
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
||||
|
@ -469,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
@ -531,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
@ -308,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
||||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
|
||||
amdgpu_bo_fence(bo, f, false);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
@ -499,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
@ -553,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
@ -666,12 +659,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
@ -248,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
@ -312,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
@ -250,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
@ -313,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
} else {
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
}
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
|
Loading…
Reference in New Issue
Block a user