mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 05:08:41 +07:00
drm/amdgpu: clean up amd sched wait_ts and wait_signal
Remove code not used at the moment. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
parent
7fc1195901
commit
1d7dd229f5
@ -90,12 +90,6 @@ static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
|
||||
ring = fence->ring;
|
||||
adev = ring->adev;
|
||||
|
||||
if (sched_job->ctx) {
|
||||
c_entity = &sched_job->ctx->rings[ring->idx].c_entity;
|
||||
atomic64_set(&c_entity->last_signaled_v_seq,
|
||||
sched_job->ibs[sched_job->num_ibs - 1].sequence);
|
||||
}
|
||||
|
||||
/* wake up users waiting for time stamp */
|
||||
wake_up_all(&c_entity->wait_queue);
|
||||
|
||||
|
@ -208,7 +208,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
|
||||
entity->context_id = context_id;
|
||||
atomic64_set(&entity->last_emitted_v_seq, seq_ring);
|
||||
atomic64_set(&entity->last_queued_v_seq, seq_ring);
|
||||
atomic64_set(&entity->last_signaled_v_seq, seq_ring);
|
||||
|
||||
/* Add the entity to the run queue */
|
||||
mutex_lock(&rq->lock);
|
||||
@ -317,20 +316,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the virtual sequence number for specified context
|
||||
*
|
||||
* @seq The virtual sequence number to check
|
||||
* @c_entity The pointer to a valid amd_context_entity
|
||||
*
|
||||
* return 0 if signaled, -1 else.
|
||||
*/
|
||||
int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq)
|
||||
{
|
||||
return (seq <= atomic64_read(&c_entity->last_signaled_v_seq)) ? 0 : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for a virtual sequence number to be signaled or timeout
|
||||
* Wait for a virtual sequence number to be emitted.
|
||||
*
|
||||
* @c_entity The pointer to a valid context entity
|
||||
* @seq The virtual sequence number to wait
|
||||
@ -340,16 +326,13 @@ int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq)
|
||||
*
|
||||
* return =0 signaled , <0 failed
|
||||
*/
|
||||
static int amd_sched_wait(struct amd_context_entity *c_entity,
|
||||
uint64_t seq,
|
||||
bool intr,
|
||||
long timeout,
|
||||
bool emit)
|
||||
int amd_sched_wait_emit(struct amd_context_entity *c_entity,
|
||||
uint64_t seq,
|
||||
bool intr,
|
||||
long timeout)
|
||||
{
|
||||
atomic64_t *v_seq = emit ? &c_entity->last_emitted_v_seq :
|
||||
&c_entity->last_signaled_v_seq;
|
||||
wait_queue_head_t *wait_queue = emit ? &c_entity->wait_emit :
|
||||
&c_entity->wait_queue;
|
||||
atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
|
||||
wait_queue_head_t *wait_queue = &c_entity->wait_emit;
|
||||
|
||||
if (intr && (timeout < 0)) {
|
||||
wait_event_interruptible(
|
||||
@ -379,22 +362,6 @@ static int amd_sched_wait(struct amd_context_entity *c_entity,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amd_sched_wait_signal(struct amd_context_entity *c_entity,
|
||||
uint64_t seq,
|
||||
bool intr,
|
||||
long timeout)
|
||||
{
|
||||
return amd_sched_wait(c_entity, seq, intr, timeout, false);
|
||||
}
|
||||
|
||||
int amd_sched_wait_emit(struct amd_context_entity *c_entity,
|
||||
uint64_t seq,
|
||||
bool intr,
|
||||
long timeout)
|
||||
{
|
||||
return amd_sched_wait(c_entity, seq, intr, timeout, true);
|
||||
}
|
||||
|
||||
static int amd_sched_main(void *param)
|
||||
{
|
||||
int r;
|
||||
|
@ -74,7 +74,6 @@ struct amd_context_entity {
|
||||
/* the virtual_seq is unique per context per ring */
|
||||
atomic64_t last_queued_v_seq;
|
||||
atomic64_t last_emitted_v_seq;
|
||||
atomic64_t last_signaled_v_seq;
|
||||
pid_t tgid;
|
||||
uint32_t context_id;
|
||||
/* the job_queue maintains the jobs submitted by clients */
|
||||
@ -134,10 +133,6 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
|
||||
struct amd_context_entity *c_entity,
|
||||
void *job);
|
||||
|
||||
int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq);
|
||||
|
||||
int amd_sched_wait_signal(struct amd_context_entity *c_entity,
|
||||
uint64_t seq, bool intr, long timeout);
|
||||
int amd_sched_wait_emit(struct amd_context_entity *c_entity,
|
||||
uint64_t seq,
|
||||
bool intr,
|
||||
|
Loading…
Reference in New Issue
Block a user