mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 12:17:11 +07:00
drm/amdgpu: fix amdgpu_vm_handle_moved as well v2
There is no guarantee that the last BO_VA actually needed an update. Additional to that all command submissions must wait for moved BOs to be cleared, not just the first one. v2: Don't overwrite any newer fence. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
4a00f21db8
commit
4e55eb3879
@ -814,7 +814,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
||||
|
||||
}
|
||||
|
||||
r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
dma_addr_t *pages_addr = NULL;
|
||||
struct ttm_mem_reg *mem;
|
||||
struct drm_mm_node *nodes;
|
||||
struct dma_fence *exclusive;
|
||||
struct dma_fence *exclusive, **last_update;
|
||||
uint64_t flags;
|
||||
int r;
|
||||
|
||||
@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
else
|
||||
flags = 0x0;
|
||||
|
||||
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
|
||||
last_update = &vm->last_update;
|
||||
else
|
||||
last_update = &bo_va->last_pt_update;
|
||||
|
||||
if (!clear && bo_va->base.moved) {
|
||||
bo_va->base.moved = false;
|
||||
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
||||
@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
||||
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
||||
mapping, flags, nodes,
|
||||
&bo_va->last_pt_update);
|
||||
last_update);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
trace_amdgpu_vm_bo_mapping(mapping);
|
||||
}
|
||||
|
||||
if (bo_va->base.bo &&
|
||||
bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
||||
dma_fence_put(vm->last_update);
|
||||
vm->last_update = dma_fence_get(bo_va->last_pt_update);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
* PTs have to be reserved!
|
||||
*/
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync)
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = NULL;
|
||||
bool clear;
|
||||
int r = 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->moved)) {
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
|
||||
bo_va = list_first_entry(&vm->moved,
|
||||
struct amdgpu_bo_va, base.vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
}
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
if (bo_va)
|
||||
r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -250,8 +250,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct dma_fence **fence);
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync);
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
|
Loading…
Reference in New Issue
Block a user