mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 05:08:41 +07:00
drm/amdgpu: new implement for fence_wait_any (v2)
origninal method will sleep/schedule at the granurarity of HZ/2 and based on seq signal method, the new implement is based on kernel fance interface, no unnecessary schedule at all v2: replace logic of original amdgpu_fence_wait_any Signed-off-by: monk.liu <monk.liu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
2e536084f2
commit
332dfe907b
@ -440,9 +440,9 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
|
||||
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
|
||||
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct amdgpu_fence **fences,
|
||||
bool intr);
|
||||
bool intr, long t);
|
||||
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
|
||||
void amdgpu_fence_unref(struct amdgpu_fence **fence);
|
||||
|
||||
@ -487,7 +487,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
|
||||
return a->seq < b->seq;
|
||||
}
|
||||
|
||||
int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
|
||||
int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
|
||||
void *owner, struct amdgpu_fence **fence);
|
||||
|
||||
/*
|
||||
|
@ -630,49 +630,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait_any - wait for a fence to signal on any ring
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @fences: amdgpu fence object(s)
|
||||
* @intr: use interruptable sleep
|
||||
*
|
||||
* Wait for any requested fence to signal (all asics). Fence
|
||||
* array is indexed by ring id. @intr selects whether to use
|
||||
* interruptable (true) or non-interruptable (false) sleep when
|
||||
* waiting for the fences. Used by the suballocator.
|
||||
* Returns 0 if any fence has passed, error for all other cases.
|
||||
*/
|
||||
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct amdgpu_fence **fences,
|
||||
bool intr)
|
||||
{
|
||||
uint64_t seq[AMDGPU_MAX_RINGS];
|
||||
unsigned i, num_rings = 0;
|
||||
long r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
seq[i] = 0;
|
||||
|
||||
if (!fences[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seq[i] = fences[i]->seq;
|
||||
++num_rings;
|
||||
}
|
||||
|
||||
/* nothing to wait for ? */
|
||||
if (num_rings == 0)
|
||||
return -ENOENT;
|
||||
|
||||
r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0) {
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_wait_next - wait for the next fence to signal
|
||||
*
|
||||
@ -1128,6 +1085,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
|
||||
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
|
||||
}
|
||||
|
||||
static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
|
||||
{
|
||||
int idx;
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
idx = 0;
|
||||
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
|
||||
fence = fences[idx];
|
||||
if (fence) {
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
struct amdgpu_wait_cb {
|
||||
struct fence_cb base;
|
||||
struct task_struct *task;
|
||||
@ -1182,6 +1155,62 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
|
||||
return t;
|
||||
}
|
||||
|
||||
/* wait until any fence in array signaled */
|
||||
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct amdgpu_fence **array, bool intr, signed long t)
|
||||
{
|
||||
long idx = 0;
|
||||
struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
BUG_ON(!array);
|
||||
|
||||
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
|
||||
fence = array[idx];
|
||||
if (fence) {
|
||||
cb[idx].task = current;
|
||||
if (fence_add_callback(&fence->base,
|
||||
&cb[idx].base, amdgpu_fence_wait_cb))
|
||||
return t; /* return if fence is already signaled */
|
||||
}
|
||||
}
|
||||
|
||||
while (t > 0) {
|
||||
if (intr)
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
else
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* amdgpu_test_signaled_any must be called after
|
||||
* set_current_state to prevent a race with wake_up_process
|
||||
*/
|
||||
if (amdgpu_test_signaled_any(array))
|
||||
break;
|
||||
|
||||
if (adev->needs_reset) {
|
||||
t = -EDEADLK;
|
||||
break;
|
||||
}
|
||||
|
||||
t = schedule_timeout(t);
|
||||
|
||||
if (t > 0 && intr && signal_pending(current))
|
||||
t = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
idx = 0;
|
||||
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
|
||||
fence = array[idx];
|
||||
if (fence)
|
||||
fence_remove_callback(&fence->base, &cb[idx].base);
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
const struct fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
|
@ -350,7 +350,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
||||
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
|
||||
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
r = amdgpu_fence_wait_any(adev, fences, false);
|
||||
r = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
|
||||
r = (r > 0) ? 0 : r;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
/* if we have nothing to wait for block */
|
||||
if (r == -ENOENT) {
|
||||
|
Loading…
Reference in New Issue
Block a user