drm/amdgpu: rework vm_grab_id interface

This makes assigning VM IDs independent from the use of VM IDs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
This commit is contained in:
Christian König 2015-07-20 16:09:40 +02:00 committed by Alex Deucher
parent fc8fa5e428
commit 7f8a5290f5
3 changed files with 17 additions and 15 deletions

View File

@ -2294,8 +2294,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *head);
struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct amdgpu_fence *updates);

View File

@ -165,9 +165,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
if (vm) {
/* grab a vm id if necessary */
struct amdgpu_fence *vm_id_fence = NULL;
vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
r = amdgpu_sync_fence(adev, &ibs->sync, &vm_id_fence->base);
r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
if (r) {
amdgpu_ring_unlock_undo(ring);
return r;

View File

@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
* @ring: ring we want to submit job to
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
*
* Allocate an id for the vm (cayman+).
* Returns the fence we need to sync to (if any).
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*
* Global and local mutex must be locked!
* Global mutex must be locked!
*/
struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
struct amdgpu_vm *vm)
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync)
{
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
return NULL;
return 0;
/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
/* found a free one */
vm_id->id = i;
trace_amdgpu_vm_grab_id(i, ring->idx);
return NULL;
return 0;
}
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
for (i = 0; i < 2; ++i) {
if (choices[i]) {
struct amdgpu_fence *fence;
fence = adev->vm_manager.active[choices[i]];
vm_id->id = choices[i];
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
return adev->vm_manager.active[choices[i]];
return amdgpu_sync_fence(ring->adev, sync, &fence->base);
}
}
/* should never happen */
BUG();
return NULL;
return -EINVAL;
}
/**