drm/amdgpu: rework synchronization of VM updates v4

If provided we only sync to the BOs reservation
object and no longer to the root PD.

v2: update comment, cleanup amdgpu_bo_sync_wait_resv
v3: use correct reservation object while clearing
v4: fix typo in amdgpu_bo_sync_wait_resv

Signed-off-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom St Denis <tom.stdenis@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2020-01-23 14:49:45 +01:00 committed by Alex Deucher
parent 4939d973b6
commit 9f3cc18d19
7 changed files with 70 additions and 65 deletions

View File

@ -1403,28 +1403,49 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
} }
/** /**
* amdgpu_sync_wait_resv - Wait for BO reservation fences * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
* *
* @bo: buffer object * @adev: amdgpu device pointer
* @resv: reservation object to sync to
* @sync_mode: synchronization mode
* @owner: fence owner * @owner: fence owner
* @intr: Whether the wait is interruptible * @intr: Whether the wait is interruptible
* *
* Extract the fences from the reservation object and waits for them to finish.
*
* Returns:
* 0 on success, errno otherwise.
*/
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode, void *owner,
bool intr)
{
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return r;
}
/**
* amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
* @bo: buffer object to wait for
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
* Wrapper to wait for fences in a BO.
* Returns: * Returns:
* 0 on success, errno otherwise. * 0 on success, errno otherwise.
*/ */
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync); return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, AMDGPU_SYNC_NE_OWNER, owner, intr);
AMDGPU_SYNC_NE_OWNER, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return r;
} }
/** /**

View File

@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared); bool shared);
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode, void *owner,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo); int amdgpu_bo_validate(struct amdgpu_bo *bo);

View File

@ -249,13 +249,6 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
owner != AMDGPU_FENCE_OWNER_UNDEFINED) owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue; continue;
/* VM updates only sync with moves but not with user
* command submissions or KFD evictions fences
*/
if (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
owner == AMDGPU_FENCE_OWNER_VM)
continue;
/* Ignore fences depending on the sync mode */ /* Ignore fences depending on the sync mode */
switch (mode) { switch (mode) {
case AMDGPU_SYNC_ALWAYS: case AMDGPU_SYNC_ALWAYS:

View File

@ -797,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm; params.vm = vm;
params.direct = direct; params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; return r;
@ -1293,7 +1293,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm; params.vm = vm;
params.direct = direct; params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
return r; return r;
@ -1554,7 +1554,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @direct: direct submission in a page fault * @direct: direct submission in a page fault
* @exclusive: fence we need to sync to * @resv: fences we need to sync to
* @start: start of mapped range * @start: start of mapped range
* @last: last mapped entry * @last: last mapped entry
* @flags: flags for the entries * @flags: flags for the entries
@ -1569,14 +1569,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/ */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct, struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive, struct dma_resv *resv,
uint64_t start, uint64_t last, uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr, uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
void *owner = AMDGPU_FENCE_OWNER_VM; enum amdgpu_sync_mode sync_mode;
int r; int r;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
@ -1585,9 +1585,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct; params.direct = direct;
params.pages_addr = pages_addr; params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */ /* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
*/
if (!(flags & AMDGPU_PTE_VALID)) if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD; sync_mode = AMDGPU_SYNC_EQ_OWNER;
else
sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm); amdgpu_vm_eviction_lock(vm);
if (vm->evicting) { if (vm->evicting) {
@ -1595,7 +1599,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock; goto error_unlock;
} }
r = vm->update_funcs->prepare(&params, owner, exclusive); r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r) if (r)
goto error_unlock; goto error_unlock;
@ -1614,7 +1618,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm * @vm: requested vm
* @mapping: mapped range and flags to use for the update * @mapping: mapped range and flags to use for the update
@ -1630,7 +1634,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* 0 for success, -EINVAL for failure. * 0 for success, -EINVAL for failure.
*/ */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive, struct dma_resv *resv,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
@ -1706,7 +1710,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
} }
last = min((uint64_t)mapping->last, start + max_entries - 1); last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr, start, last, flags, addr,
dma_addr, fence); dma_addr, fence);
if (r) if (r)
@ -1745,7 +1749,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL; dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem; struct ttm_mem_reg *mem;
struct drm_mm_node *nodes; struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update; struct dma_fence **last_update;
struct dma_resv *resv;
uint64_t flags; uint64_t flags;
struct amdgpu_device *bo_adev = adev; struct amdgpu_device *bo_adev = adev;
int r; int r;
@ -1753,7 +1758,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) { if (clear || !bo) {
mem = NULL; mem = NULL;
nodes = NULL; nodes = NULL;
exclusive = NULL; resv = vm->root.base.bo->tbo.base.resv;
} else { } else {
struct ttm_dma_tt *ttm; struct ttm_dma_tt *ttm;
@ -1763,7 +1768,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address; pages_addr = ttm->dma_address;
} }
exclusive = bo->tbo.moving; resv = bo->tbo.base.resv;
} }
if (bo) { if (bo) {
@ -1773,7 +1778,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0; flags = 0x0;
} }
if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) if (clear || (bo && bo->tbo.base.resv ==
vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update; last_update = &vm->last_update;
else else
last_update = &bo_va->last_pt_update; last_update = &bo_va->last_pt_update;
@ -1787,7 +1793,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} }
list_for_each_entry(mapping, &bo_va->invalids, list) { list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes, mapping, flags, bo_adev, nodes,
last_update); last_update);
if (r) if (r)
@ -1982,6 +1988,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0; uint64_t init_pte_value = 0;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
@ -1996,7 +2003,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START) mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC; init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last, mapping->start, mapping->last,
init_pte_value, 0, NULL, &f); init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f); amdgpu_vm_free_mapping(adev, vm, mapping, f);

View File

@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs { struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo); int (*map_table)(struct amdgpu_bo *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
struct dma_fence *exclusive); enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p, int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags); unsigned count, uint32_t incr, uint64_t flags);

View File

@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns: * Returns:
* Negativ errno, 0 for success. * Negativ errno, 0 for success.
*/ */
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
struct dma_fence *exclusive) struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{ {
int r; if (!resv)
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
if (unlikely(r))
return r;
}
/* Don't wait for submissions during page fault */
if (p->direct)
return 0; return 0;
/* Wait for PT BOs to be idle. PTs share the same resv. object return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
* as the root PD BO
*/
return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
} }
/** /**

View File

@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success. * Negativ errno, 0 for success.
*/ */
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
void *owner, struct dma_fence *exclusive) struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{ {
struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r; int r;
@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw; p->num_dw_left = ndw;
/* Wait for moves to be completed */ if (!resv)
r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
if (r)
return r;
/* Don't wait for any submissions during page fault handling */
if (p->direct)
return 0; return 0;
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
AMDGPU_SYNC_NE_OWNER, owner);
} }
/** /**