2015-04-21 03:55:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
|
|
* Copyright 2008 Red Hat Inc.
|
|
|
|
* Copyright 2009 Jerome Glisse.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Dave Airlie
|
|
|
|
* Alex Deucher
|
|
|
|
* Jerome Glisse
|
|
|
|
*/
|
2016-10-25 19:00:45 +07:00
|
|
|
#include <linux/dma-fence-array.h>
|
2017-03-30 19:03:59 +07:00
|
|
|
#include <linux/interval_tree_generic.h>
|
2017-08-26 07:40:26 +07:00
|
|
|
#include <linux/idr.h>
|
2015-04-21 03:55:21 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/amdgpu_drm.h>
|
|
|
|
#include "amdgpu.h"
|
|
|
|
#include "amdgpu_trace.h"
|
2018-03-16 04:27:43 +07:00
|
|
|
#include "amdgpu_amdkfd.h"
|
2018-06-13 01:28:20 +07:00
|
|
|
#include "amdgpu_gmc.h"
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* DOC: GPUVM
|
|
|
|
*
|
2015-04-21 03:55:21 +07:00
|
|
|
* GPUVM is similar to the legacy gart on older asics, however
|
|
|
|
* rather than there being a single global gart table
|
|
|
|
* for the entire GPU, there are multiple VM page tables active
|
|
|
|
* at any given time. The VM page tables can contain a mix
|
|
|
|
* vram pages and system memory pages and system memory pages
|
|
|
|
* can be mapped as snooped (cached system pages) or unsnooped
|
|
|
|
* (uncached system pages).
|
|
|
|
* Each VM has an ID associated with it and there is a page table
|
|
|
|
* associated with each VMID. When execting a command buffer,
|
|
|
|
* the kernel tells the the ring what VMID to use for that command
|
|
|
|
* buffer. VMIDs are allocated dynamically as commands are submitted.
|
|
|
|
* The userspace drivers maintain their own address space and the kernel
|
|
|
|
* sets up their pages tables accordingly when they submit their
|
|
|
|
* command buffers and a VMID is assigned.
|
|
|
|
* Cayman/Trinity support up to 8 active VMs at any given time;
|
|
|
|
* SI supports 16.
|
|
|
|
*/
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
#define START(node) ((node)->start)
|
|
|
|
#define LAST(node) ((node)->last)
|
|
|
|
|
|
|
|
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
|
|
|
|
START, LAST, static, amdgpu_vm_it)
|
|
|
|
|
|
|
|
#undef START
|
|
|
|
#undef LAST
|
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* struct amdgpu_pte_update_params - Local structure
|
|
|
|
*
|
|
|
|
* Encapsulate some VM table update parameters to reduce
|
2016-04-21 21:40:18 +07:00
|
|
|
* the number of function parameters
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
2016-04-21 21:40:18 +07:00
|
|
|
*/
|
2016-08-04 19:52:50 +07:00
|
|
|
struct amdgpu_pte_update_params {
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @adev: amdgpu device we do this update for
|
|
|
|
*/
|
2016-08-04 20:02:49 +07:00
|
|
|
struct amdgpu_device *adev;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @vm: optional amdgpu_vm we do this update for
|
|
|
|
*/
|
2016-10-13 20:09:08 +07:00
|
|
|
struct amdgpu_vm *vm;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @src: address where to copy page table entries from
|
|
|
|
*/
|
2016-04-21 21:40:18 +07:00
|
|
|
uint64_t src;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @ib: indirect buffer to fill with commands
|
|
|
|
*/
|
2016-04-21 21:40:18 +07:00
|
|
|
struct amdgpu_ib *ib;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @func: Function which actually does the update
|
|
|
|
*/
|
2018-01-16 22:54:25 +07:00
|
|
|
void (*func)(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_bo *bo, uint64_t pe,
|
2016-08-12 18:29:18 +07:00
|
|
|
uint64_t addr, unsigned count, uint32_t incr,
|
2016-09-21 15:19:19 +07:00
|
|
|
uint64_t flags);
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* @pages_addr:
|
|
|
|
*
|
|
|
|
* DMA addresses to use for mapping, used during VM update by CPU
|
2017-05-12 06:47:22 +07:00
|
|
|
*/
|
|
|
|
dma_addr_t *pages_addr;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @kptr:
|
|
|
|
*
|
|
|
|
* Kernel pointer of PD/PT BO that needs to be updated,
|
|
|
|
* used during VM update by CPU
|
|
|
|
*/
|
2017-05-12 06:47:22 +07:00
|
|
|
void *kptr;
|
2016-04-21 21:40:18 +07:00
|
|
|
};
|
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
|
|
|
|
*/
|
2017-01-30 17:09:31 +07:00
|
|
|
struct amdgpu_prt_cb {
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @adev: amdgpu device
|
|
|
|
*/
|
2017-01-30 17:09:31 +07:00
|
|
|
struct amdgpu_device *adev;
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @cb: callback
|
|
|
|
*/
|
2017-01-30 17:09:31 +07:00
|
|
|
struct dma_fence_cb cb;
|
|
|
|
};
|
|
|
|
|
2017-11-27 20:01:51 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_level_shift - return the addr shift for each level
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 22:11:24 +07:00
|
|
|
* @level: VMPT level
|
2017-11-27 20:01:51 +07:00
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* Returns:
|
|
|
|
* The number of bits the pfn needs to be right shifted for a level.
|
2017-11-27 20:01:51 +07:00
|
|
|
*/
|
|
|
|
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
|
|
|
|
unsigned level)
|
|
|
|
{
|
2017-12-13 13:22:54 +07:00
|
|
|
unsigned shift = 0xff;
|
|
|
|
|
|
|
|
switch (level) {
|
|
|
|
case AMDGPU_VM_PDB2:
|
|
|
|
case AMDGPU_VM_PDB1:
|
|
|
|
case AMDGPU_VM_PDB0:
|
|
|
|
shift = 9 * (AMDGPU_VM_PDB0 - level) +
|
2017-11-27 20:01:51 +07:00
|
|
|
adev->vm_manager.block_size;
|
2017-12-13 13:22:54 +07:00
|
|
|
break;
|
|
|
|
case AMDGPU_VM_PTB:
|
|
|
|
shift = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev, "the level%d isn't supported.\n", level);
|
|
|
|
}
|
|
|
|
|
|
|
|
return shift;
|
2017-11-27 20:01:51 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2016-10-19 16:03:57 +07:00
|
|
|
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 22:11:24 +07:00
|
|
|
* @level: VMPT level
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* Returns:
|
|
|
|
* The number of entries in a page directory or page table.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2016-10-19 16:03:57 +07:00
|
|
|
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
|
|
|
|
unsigned level)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-12-13 13:22:54 +07:00
|
|
|
unsigned shift = amdgpu_vm_level_shift(adev,
|
|
|
|
adev->vm_manager.root_level);
|
2017-11-20 20:29:01 +07:00
|
|
|
|
2017-12-13 13:22:54 +07:00
|
|
|
if (level == adev->vm_manager.root_level)
|
2016-10-19 16:03:57 +07:00
|
|
|
/* For the root directory */
|
2017-11-20 20:29:01 +07:00
|
|
|
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
|
2017-12-13 13:22:54 +07:00
|
|
|
else if (level != AMDGPU_VM_PTB)
|
2017-11-20 20:29:01 +07:00
|
|
|
/* Everything in between */
|
|
|
|
return 512;
|
|
|
|
else
|
2016-10-19 16:03:57 +07:00
|
|
|
/* For the page tables on the leaves */
|
2017-03-29 15:08:32 +07:00
|
|
|
return AMDGPU_VM_PTE_COUNT(adev);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2018-09-15 15:02:13 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @level: VMPT level
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The mask to extract the entry number of a PD/PT from an address.
|
|
|
|
*/
|
|
|
|
static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
|
|
|
|
unsigned int level)
|
|
|
|
{
|
|
|
|
if (level <= adev->vm_manager.root_level)
|
|
|
|
return 0xffffffff;
|
|
|
|
else if (level != AMDGPU_VM_PTB)
|
|
|
|
return 0x1ff;
|
|
|
|
else
|
|
|
|
return AMDGPU_VM_PTE_COUNT(adev) - 1;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2016-10-19 16:03:57 +07:00
|
|
|
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 22:11:24 +07:00
|
|
|
* @level: VMPT level
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* Returns:
|
|
|
|
* The size of the BO for a page directory or page table in bytes.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2016-10-19 16:03:57 +07:00
|
|
|
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-10-19 16:03:57 +07:00
|
|
|
return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2018-08-30 15:27:15 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_evicted - vm_bo is evicted
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is evicted
|
|
|
|
*
|
|
|
|
* State for PDs/PTs and per VM BOs which are not at the location they should
|
|
|
|
* be.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm = vm_bo->vm;
|
|
|
|
struct amdgpu_bo *bo = vm_bo->bo;
|
|
|
|
|
|
|
|
vm_bo->moved = true;
|
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel)
|
|
|
|
list_move(&vm_bo->vm_status, &vm->evicted);
|
|
|
|
else
|
|
|
|
list_move_tail(&vm_bo->vm_status, &vm->evicted);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_relocated - vm_bo is reloacted
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is relocated
|
|
|
|
*
|
|
|
|
* State for PDs/PTs which needs to update their parent PD.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_moved - vm_bo is moved
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is moved
|
|
|
|
*
|
|
|
|
* State for per VM BOs which are moved, but that change is not yet reflected
|
|
|
|
* in the page tables.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_idle - vm_bo is idle
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is now idle
|
|
|
|
*
|
|
|
|
* State for PDs/PTs and per VM BOs which have gone through the state machine
|
|
|
|
* and are now idle.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
|
|
|
|
vm_bo->moved = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_invalidated - vm_bo is invalidated
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is now invalidated
|
|
|
|
*
|
|
|
|
* State for normal BOs which are invalidated and that change not yet reflected
|
|
|
|
* in the PTs.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
spin_lock(&vm_bo->vm->invalidated_lock);
|
|
|
|
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
|
|
|
|
spin_unlock(&vm_bo->vm->invalidated_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_done - vm_bo is done
|
|
|
|
*
|
|
|
|
* @vm_bo: vm_bo which is now done
|
|
|
|
*
|
|
|
|
* State for normal BOs which are invalidated and that change has been updated
|
|
|
|
* in the PTs.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
|
|
|
|
{
|
|
|
|
spin_lock(&vm_bo->vm->invalidated_lock);
|
|
|
|
list_del_init(&vm_bo->vm_status);
|
|
|
|
spin_unlock(&vm_bo->vm->invalidated_lock);
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:31:52 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
|
|
|
|
*
|
|
|
|
* @base: base structure for tracking BO usage in a VM
|
|
|
|
* @vm: vm to which bo is to be added
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
|
|
|
* Initialize a bo_va_base structure and add it to the appropriate lists
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
base->vm = vm;
|
|
|
|
base->bo = bo;
|
2018-09-11 01:02:46 +07:00
|
|
|
base->next = NULL;
|
2018-08-30 15:31:52 +07:00
|
|
|
INIT_LIST_HEAD(&base->vm_status);
|
|
|
|
|
|
|
|
if (!bo)
|
|
|
|
return;
|
2018-09-11 01:02:46 +07:00
|
|
|
base->next = bo->vm_bo;
|
|
|
|
bo->vm_bo = base;
|
2018-08-30 15:31:52 +07:00
|
|
|
|
|
|
|
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vm->bulk_moveable = false;
|
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel)
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_relocated(base);
|
2018-08-30 15:31:52 +07:00
|
|
|
else
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_idle(base);
|
2018-08-30 15:31:52 +07:00
|
|
|
|
|
|
|
if (bo->preferred_domains &
|
|
|
|
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we checked all the prerequisites, but it looks like this per vm bo
|
|
|
|
* is currently evicted. add the bo to the evicted list to make sure it
|
|
|
|
* is validated on next vm use to avoid fault.
|
|
|
|
* */
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_evicted(base);
|
2018-08-30 15:31:52 +07:00
|
|
|
}
|
|
|
|
|
2018-08-30 20:55:54 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_parent - get the parent page directory
|
|
|
|
*
|
|
|
|
* @pt: child page table
|
|
|
|
*
|
|
|
|
* Helper to get the parent entry for the child page table. NULL if we are at
|
|
|
|
* the root page directory.
|
|
|
|
*/
|
|
|
|
static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo *parent = pt->base.bo->parent;
|
|
|
|
|
|
|
|
if (!parent)
|
|
|
|
return NULL;
|
|
|
|
|
2018-09-11 01:02:46 +07:00
|
|
|
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
|
2018-08-30 20:55:54 +07:00
|
|
|
}
|
|
|
|
|
2018-09-01 15:36:48 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
|
|
|
|
*/
|
|
|
|
struct amdgpu_vm_pt_cursor {
|
|
|
|
uint64_t pfn;
|
|
|
|
struct amdgpu_vm_pt *parent;
|
|
|
|
struct amdgpu_vm_pt *entry;
|
|
|
|
unsigned level;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_start - start PD/PT walk
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: amdgpu_vm structure
|
|
|
|
* @start: start address of the walk
|
|
|
|
* @cursor: state to initialize
|
|
|
|
*
|
|
|
|
* Initialize a amdgpu_vm_pt_cursor to start a walk.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm, uint64_t start,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->pfn = start;
|
|
|
|
cursor->parent = NULL;
|
|
|
|
cursor->entry = &vm->root;
|
|
|
|
cursor->level = adev->vm_manager.root_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_descendant - go to child node
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Walk to the child node of the current node.
|
|
|
|
* Returns:
|
|
|
|
* True if the walk was possible, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
2018-09-15 15:02:13 +07:00
|
|
|
unsigned mask, shift, idx;
|
2018-09-01 15:36:48 +07:00
|
|
|
|
|
|
|
if (!cursor->entry->entries)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
BUG_ON(!cursor->entry->base.bo);
|
2018-09-15 15:02:13 +07:00
|
|
|
mask = amdgpu_vm_entries_mask(adev, cursor->level);
|
2018-09-01 15:36:48 +07:00
|
|
|
shift = amdgpu_vm_level_shift(adev, cursor->level);
|
|
|
|
|
|
|
|
++cursor->level;
|
2018-09-15 15:02:13 +07:00
|
|
|
idx = (cursor->pfn >> shift) & mask;
|
2018-09-01 15:36:48 +07:00
|
|
|
cursor->parent = cursor->entry;
|
|
|
|
cursor->entry = &cursor->entry->entries[idx];
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_sibling - go to sibling node
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Walk to the sibling node of the current node.
|
|
|
|
* Returns:
|
|
|
|
* True if the walk was possible, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
unsigned shift, num_entries;
|
|
|
|
|
|
|
|
/* Root doesn't have a sibling */
|
|
|
|
if (!cursor->parent)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Go to our parents and see if we got a sibling */
|
|
|
|
shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
|
|
|
|
num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
|
|
|
|
|
|
|
|
if (cursor->entry == &cursor->parent->entries[num_entries - 1])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
cursor->pfn += 1ULL << shift;
|
|
|
|
cursor->pfn &= ~((1ULL << shift) - 1);
|
|
|
|
++cursor->entry;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_ancestor - go to parent node
|
|
|
|
*
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Walk to the parent node of the current node.
|
|
|
|
* Returns:
|
|
|
|
* True if the walk was possible, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
if (!cursor->parent)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
--cursor->level;
|
|
|
|
cursor->entry = cursor->parent;
|
|
|
|
cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_next - get next PD/PT in hieratchy
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Walk the PD/PT tree to the next node.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
/* First try a newborn child */
|
|
|
|
if (amdgpu_vm_pt_descendant(adev, cursor))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If that didn't worked try to find a sibling */
|
|
|
|
while (!amdgpu_vm_pt_sibling(adev, cursor)) {
|
|
|
|
/* No sibling, go to our parents and grandparents */
|
|
|
|
if (!amdgpu_vm_pt_ancestor(cursor)) {
|
|
|
|
cursor->pfn = ~0ll;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_first_leaf - get first leaf PD/PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: amdgpu_vm structure
|
|
|
|
* @start: start addr of the walk
|
|
|
|
* @cursor: state to initialize
|
|
|
|
*
|
|
|
|
* Start a walk and go directly to the leaf node.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm, uint64_t start,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
amdgpu_vm_pt_start(adev, vm, start, cursor);
|
|
|
|
while (amdgpu_vm_pt_descendant(adev, cursor));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_next_leaf - get next leaf PD/PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Walk the PD/PT tree to the next leaf node.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
amdgpu_vm_pt_next(adev, cursor);
|
2018-10-24 21:25:23 +07:00
|
|
|
if (cursor->pfn != ~0ll)
|
|
|
|
while (amdgpu_vm_pt_descendant(adev, cursor));
|
2018-09-01 15:36:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
|
|
|
|
*/
|
|
|
|
#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
|
|
|
|
for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
|
|
|
|
(cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_first_dfs - start a deep first search
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device structure
|
|
|
|
* @vm: amdgpu_vm structure
|
|
|
|
* @cursor: state to initialize
|
|
|
|
*
|
|
|
|
* Starts a deep first traversal of the PD/PT tree.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
amdgpu_vm_pt_start(adev, vm, 0, cursor);
|
|
|
|
while (amdgpu_vm_pt_descendant(adev, cursor));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_pt_next_dfs - get the next node for a deep first search
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device structure
|
|
|
|
* @cursor: current state
|
|
|
|
*
|
|
|
|
* Move the cursor to the next node in a deep first search.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm_pt_cursor *cursor)
|
|
|
|
{
|
|
|
|
if (!cursor->entry)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!cursor->parent)
|
|
|
|
cursor->entry = NULL;
|
|
|
|
else if (amdgpu_vm_pt_sibling(adev, cursor))
|
|
|
|
while (amdgpu_vm_pt_descendant(adev, cursor));
|
|
|
|
else
|
|
|
|
amdgpu_vm_pt_ancestor(cursor);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
|
|
|
|
*/
|
|
|
|
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
|
|
|
|
for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
|
|
|
|
(entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
|
|
|
|
(entry); (entry) = (cursor).entry, \
|
|
|
|
amdgpu_vm_pt_next_dfs((adev), &(cursor)))
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2015-12-11 21:16:32 +07:00
|
|
|
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @vm: vm providing the BOs
|
2015-12-11 20:39:05 +07:00
|
|
|
* @validated: head of validation list
|
2015-12-11 21:16:32 +07:00
|
|
|
* @entry: entry to add
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Add the page directory to the list of BOs to
|
2015-12-11 21:16:32 +07:00
|
|
|
* validate for command submission.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2015-12-11 21:16:32 +07:00
|
|
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
|
struct list_head *validated,
|
|
|
|
struct amdgpu_bo_list_entry *entry)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2015-12-11 21:16:32 +07:00
|
|
|
entry->priority = 0;
|
2018-09-10 21:07:57 +07:00
|
|
|
entry->tv.bo = &vm->root.base.bo->tbo;
|
2018-09-21 23:09:59 +07:00
|
|
|
/* One for the VM updates and one for the CS job */
|
|
|
|
entry->tv.num_shared = 2;
|
2016-02-23 18:36:59 +07:00
|
|
|
entry->user_pages = NULL;
|
2015-12-11 21:16:32 +07:00
|
|
|
list_add(&entry->tv.head, validated);
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device pointer
|
|
|
|
* @vm: vm providing the BOs
|
|
|
|
*
|
|
|
|
* Move all BOs to the end of LRU and remember their positions to put them
|
|
|
|
* together.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
|
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
|
|
|
if (vm->bulk_moveable) {
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
|
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
|
|
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
|
list_for_each_entry(bo_base, &vm->idle, vm_status) {
|
|
|
|
struct amdgpu_bo *bo = bo_base->bo;
|
|
|
|
|
|
|
|
if (!bo->parent)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
|
|
|
|
if (bo->shadow)
|
|
|
|
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
|
|
|
|
&vm->lru_bulk_move);
|
|
|
|
}
|
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
|
|
vm->bulk_moveable = true;
|
|
|
|
}
|
|
|
|
|
2016-10-12 20:36:57 +07:00
|
|
|
/**
|
2016-09-28 17:03:04 +07:00
|
|
|
* amdgpu_vm_validate_pt_bos - validate the page table BOs
|
2016-10-12 20:36:57 +07:00
|
|
|
*
|
2016-06-21 21:28:15 +07:00
|
|
|
* @adev: amdgpu device pointer
|
2015-12-11 21:16:32 +07:00
|
|
|
* @vm: vm providing the BOs
|
2016-10-12 20:36:57 +07:00
|
|
|
* @validate: callback to do the validation
|
|
|
|
* @param: parameter for the validation callback
|
|
|
|
*
|
|
|
|
* Validate the page table BOs on command submission if neccessary.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Validation result.
|
2016-10-12 20:36:57 +07:00
|
|
|
*/
|
2016-09-28 17:03:04 +07:00
|
|
|
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int (*validate)(void *p, struct amdgpu_bo *bo),
|
|
|
|
void *param)
|
2016-10-12 20:36:57 +07:00
|
|
|
{
|
2018-04-19 16:02:54 +07:00
|
|
|
struct amdgpu_vm_bo_base *bo_base, *tmp;
|
|
|
|
int r = 0;
|
2016-10-12 20:36:57 +07:00
|
|
|
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
vm->bulk_moveable &= list_empty(&vm->evicted);
|
|
|
|
|
2018-04-19 16:02:54 +07:00
|
|
|
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
|
|
|
|
struct amdgpu_bo *bo = bo_base->bo;
|
2016-10-12 20:36:57 +07:00
|
|
|
|
2018-08-16 00:10:40 +07:00
|
|
|
r = validate(param, bo);
|
|
|
|
if (r)
|
|
|
|
break;
|
2016-10-12 20:36:57 +07:00
|
|
|
|
2018-04-19 15:56:02 +07:00
|
|
|
if (bo->tbo.type != ttm_bo_type_kernel) {
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_moved(bo_base);
|
2018-04-19 15:56:02 +07:00
|
|
|
} else {
|
2018-08-30 14:45:07 +07:00
|
|
|
if (vm->use_cpu_for_update)
|
|
|
|
r = amdgpu_bo_kmap(bo, NULL);
|
|
|
|
else
|
|
|
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
2018-08-22 21:44:56 +07:00
|
|
|
if (r)
|
|
|
|
break;
|
2018-08-29 19:52:50 +07:00
|
|
|
if (bo->shadow) {
|
|
|
|
r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
|
|
|
|
if (r)
|
|
|
|
break;
|
|
|
|
}
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_relocated(bo_base);
|
2018-04-19 15:56:02 +07:00
|
|
|
}
|
2016-10-12 20:36:57 +07:00
|
|
|
}
|
|
|
|
|
2018-04-19 16:02:54 +07:00
|
|
|
return r;
|
2016-10-12 20:36:57 +07:00
|
|
|
}
|
|
|
|
|
2015-12-11 21:16:32 +07:00
|
|
|
/**
|
2017-08-24 17:32:55 +07:00
|
|
|
* amdgpu_vm_ready - check VM is ready for updates
|
2015-12-11 21:16:32 +07:00
|
|
|
*
|
2017-08-24 17:32:55 +07:00
|
|
|
* @vm: VM to check
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2017-08-24 17:32:55 +07:00
|
|
|
* Check if all VM PDs/PTs are ready for updates
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if eviction list is empty.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-08-03 19:02:13 +07:00
|
|
|
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2018-04-19 15:56:02 +07:00
|
|
|
return list_empty(&vm->evicted);
|
2016-10-13 15:20:53 +07:00
|
|
|
}
|
|
|
|
|
2018-01-24 23:19:04 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-06-11 22:11:24 +07:00
|
|
|
* @vm: VM to clear BO from
|
2018-01-24 23:19:04 +07:00
|
|
|
* @bo: BO to clear
|
|
|
|
* @level: level this BO is at
|
2018-06-14 03:01:38 +07:00
|
|
|
* @pte_support_ats: indicate ATS support from PTE
|
2018-01-24 23:19:04 +07:00
|
|
|
*
|
|
|
|
* Root PD needs to be reserved when calling this.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2018-01-24 23:19:04 +07:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
2018-01-26 00:36:15 +07:00
|
|
|
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
|
|
|
|
unsigned level, bool pte_support_ats)
|
2018-01-24 23:19:04 +07:00
|
|
|
{
|
|
|
|
struct ttm_operation_ctx ctx = { true, false };
|
|
|
|
struct dma_fence *fence = NULL;
|
2018-01-26 00:36:15 +07:00
|
|
|
unsigned entries, ats_entries;
|
2018-01-24 23:19:04 +07:00
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct amdgpu_job *job;
|
2018-01-26 00:36:15 +07:00
|
|
|
uint64_t addr;
|
2018-01-24 23:19:04 +07:00
|
|
|
int r;
|
|
|
|
|
2018-01-26 00:36:15 +07:00
|
|
|
entries = amdgpu_bo_size(bo) / 8;
|
|
|
|
|
|
|
|
if (pte_support_ats) {
|
|
|
|
if (level == adev->vm_manager.root_level) {
|
|
|
|
ats_entries = amdgpu_vm_level_shift(adev, level);
|
|
|
|
ats_entries += AMDGPU_GPU_PAGE_SHIFT;
|
2018-08-27 23:22:31 +07:00
|
|
|
ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
|
2018-01-26 00:36:15 +07:00
|
|
|
ats_entries = min(ats_entries, entries);
|
|
|
|
entries -= ats_entries;
|
|
|
|
} else {
|
|
|
|
ats_entries = entries;
|
|
|
|
entries = 0;
|
|
|
|
}
|
2018-01-24 23:19:04 +07:00
|
|
|
} else {
|
2018-01-26 00:36:15 +07:00
|
|
|
ats_entries = 0;
|
2018-01-24 23:19:04 +07:00
|
|
|
}
|
|
|
|
|
2018-07-20 19:21:06 +07:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
|
2018-01-24 23:19:04 +07:00
|
|
|
|
|
|
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
2018-08-22 21:44:56 +07:00
|
|
|
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2018-01-24 23:19:04 +07:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
|
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
2018-08-16 17:01:03 +07:00
|
|
|
addr = amdgpu_bo_gpu_offset(bo);
|
2018-01-26 00:36:15 +07:00
|
|
|
if (ats_entries) {
|
|
|
|
uint64_t ats_value;
|
|
|
|
|
|
|
|
ats_value = AMDGPU_PTE_DEFAULT_ATC;
|
|
|
|
if (level != AMDGPU_VM_PTB)
|
|
|
|
ats_value |= AMDGPU_PDE_PTE;
|
|
|
|
|
|
|
|
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
|
|
|
|
ats_entries, 0, ats_value);
|
|
|
|
addr += ats_entries * 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entries)
|
|
|
|
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
|
|
|
|
entries, 0, 0);
|
|
|
|
|
2018-01-24 23:19:04 +07:00
|
|
|
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
|
|
|
|
|
|
|
WARN_ON(job->ibs[0].length_dw > 64);
|
2018-02-05 01:36:52 +07:00
|
|
|
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
|
|
|
|
AMDGPU_FENCE_OWNER_UNDEFINED, false);
|
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2018-07-13 18:54:56 +07:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
|
|
|
|
&fence);
|
2018-01-24 23:19:04 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
|
|
|
amdgpu_bo_fence(bo, fence, true);
|
|
|
|
dma_fence_put(fence);
|
2018-02-03 03:05:40 +07:00
|
|
|
|
|
|
|
if (bo->shadow)
|
|
|
|
return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
|
|
|
|
level, pte_support_ats);
|
|
|
|
|
2018-01-24 23:19:04 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_free:
|
|
|
|
amdgpu_job_free(job);
|
|
|
|
|
|
|
|
error:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-08-28 03:17:59 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requesting vm
|
|
|
|
* @bp: resulting BO allocation parameters
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int level, struct amdgpu_bo_param *bp)
|
|
|
|
{
|
|
|
|
memset(bp, 0, sizeof(*bp));
|
|
|
|
|
|
|
|
bp->size = amdgpu_vm_bo_size(adev, level);
|
|
|
|
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
|
2018-08-22 21:44:56 +07:00
|
|
|
if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
|
|
|
|
adev->flags & AMD_IS_APU)
|
|
|
|
bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
|
|
|
|
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
|
|
|
|
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
|
|
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
2018-08-28 03:17:59 +07:00
|
|
|
if (vm->use_cpu_for_update)
|
|
|
|
bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
2018-09-06 07:19:54 +07:00
|
|
|
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
|
|
|
|
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
|
2018-08-28 03:17:59 +07:00
|
|
|
bp->type = ttm_bo_type_kernel;
|
|
|
|
if (vm->root.base.bo)
|
|
|
|
bp->resv = vm->root.base.bo->tbo.resv;
|
|
|
|
}
|
|
|
|
|
2017-03-13 16:13:37 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_alloc_pts - Allocate page tables.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM to allocate page tables for
|
|
|
|
* @saddr: Start address which needs to be allocated
|
|
|
|
* @size: Size from start address we need.
|
|
|
|
*
|
2018-09-01 17:03:37 +07:00
|
|
|
* Make sure the page directories and page tables are allocated
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2017-03-13 16:13:37 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
2018-09-01 17:03:37 +07:00
|
|
|
struct amdgpu_vm_pt_cursor cursor;
|
|
|
|
struct amdgpu_bo *pt;
|
2018-01-26 00:36:15 +07:00
|
|
|
bool ats = false;
|
2018-09-01 17:03:37 +07:00
|
|
|
uint64_t eaddr;
|
|
|
|
int r;
|
2017-03-13 16:13:37 +07:00
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
2018-01-26 00:36:15 +07:00
|
|
|
|
|
|
|
if (vm->pte_support_ats)
|
2018-08-27 23:22:31 +07:00
|
|
|
ats = saddr < AMDGPU_GMC_HOLE_START;
|
2017-03-13 16:13:37 +07:00
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2018-01-26 00:36:15 +07:00
|
|
|
if (eaddr >= adev->vm_manager.max_pfn) {
|
|
|
|
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
|
|
|
|
eaddr, adev->vm_manager.max_pfn);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-09-01 17:03:37 +07:00
|
|
|
for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
|
|
|
|
struct amdgpu_vm_pt *entry = cursor.entry;
|
|
|
|
struct amdgpu_bo_param bp;
|
|
|
|
|
|
|
|
if (cursor.level < AMDGPU_VM_PTB) {
|
|
|
|
unsigned num_entries;
|
|
|
|
|
|
|
|
num_entries = amdgpu_vm_num_entries(adev, cursor.level);
|
|
|
|
entry->entries = kvmalloc_array(num_entries,
|
|
|
|
sizeof(*entry->entries),
|
|
|
|
GFP_KERNEL |
|
|
|
|
__GFP_ZERO);
|
|
|
|
if (!entry->entries)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (entry->base.bo)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
|
|
|
|
|
|
|
|
r = amdgpu_bo_create(adev, &bp, &pt);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
|
|
|
|
if (r)
|
|
|
|
goto error_free_pt;
|
|
|
|
|
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
r = amdgpu_bo_kmap(pt, NULL);
|
|
|
|
if (r)
|
|
|
|
goto error_free_pt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Keep a reference to the root directory to avoid
|
|
|
|
* freeing them up in the wrong order.
|
|
|
|
*/
|
|
|
|
pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
|
|
|
|
|
|
|
|
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_free_pt:
|
|
|
|
amdgpu_bo_unref(&pt->shadow);
|
|
|
|
amdgpu_bo_unref(&pt);
|
|
|
|
return r;
|
2017-03-13 16:13:37 +07:00
|
|
|
}
|
|
|
|
|
2018-09-06 20:35:13 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_pts - free PD/PT levels
|
|
|
|
*
|
|
|
|
* @adev: amdgpu device structure
|
2018-09-15 15:04:54 +07:00
|
|
|
* @vm: amdgpu vm structure
|
2018-09-06 20:35:13 +07:00
|
|
|
*
|
|
|
|
* Free the page directory or page table level and all sub levels.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm_pt_cursor cursor;
|
|
|
|
struct amdgpu_vm_pt *entry;
|
|
|
|
|
|
|
|
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
|
|
|
|
|
|
|
|
if (entry->base.bo) {
|
2018-09-11 01:02:46 +07:00
|
|
|
entry->base.bo->vm_bo = NULL;
|
2018-09-06 20:35:13 +07:00
|
|
|
list_del(&entry->base.vm_status);
|
|
|
|
amdgpu_bo_unref(&entry->base.bo->shadow);
|
|
|
|
amdgpu_bo_unref(&entry->base.bo);
|
|
|
|
}
|
|
|
|
kvfree(entry->entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(vm->root.base.bo);
|
|
|
|
}
|
|
|
|
|
2017-06-01 20:42:59 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
|
2016-06-18 04:05:15 +07:00
|
|
|
{
|
2016-10-14 04:41:13 +07:00
|
|
|
const struct amdgpu_ip_block *ip_block;
|
2017-06-01 20:42:59 +07:00
|
|
|
bool has_compute_vm_bug;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
int i;
|
2016-06-18 04:05:15 +07:00
|
|
|
|
2017-06-01 20:42:59 +07:00
|
|
|
has_compute_vm_bug = false;
|
2016-06-18 04:05:15 +07:00
|
|
|
|
2017-12-16 04:18:00 +07:00
|
|
|
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
|
2017-06-01 20:42:59 +07:00
|
|
|
if (ip_block) {
|
|
|
|
/* Compute has a VM bug for GFX version < 7.
|
|
|
|
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
|
|
|
|
if (ip_block->version->major <= 7)
|
|
|
|
has_compute_vm_bug = true;
|
|
|
|
else if (ip_block->version->major == 8)
|
|
|
|
if (adev->gfx.mec_fw_version < 673)
|
|
|
|
has_compute_vm_bug = true;
|
|
|
|
}
|
2016-06-18 04:05:15 +07:00
|
|
|
|
2017-06-01 20:42:59 +07:00
|
|
|
for (i = 0; i < adev->num_rings; i++) {
|
|
|
|
ring = adev->rings[i];
|
|
|
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
|
|
|
|
/* only compute rings */
|
|
|
|
ring->has_compute_vm_bug = has_compute_vm_bug;
|
2016-06-18 04:05:15 +07:00
|
|
|
else
|
2017-06-01 20:42:59 +07:00
|
|
|
ring->has_compute_vm_bug = false;
|
2016-06-18 04:05:15 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
|
|
|
|
*
|
|
|
|
* @ring: ring on which the job will be submitted
|
|
|
|
* @job: job to submit
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if sync is needed.
|
|
|
|
*/
|
2017-05-12 01:52:48 +07:00
|
|
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_job *job)
|
2017-03-09 23:36:26 +07:00
|
|
|
{
|
2017-05-12 01:52:48 +07:00
|
|
|
struct amdgpu_device *adev = ring->adev;
|
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
2017-12-18 22:53:03 +07:00
|
|
|
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
|
|
|
struct amdgpu_vmid *id;
|
2017-05-12 01:52:48 +07:00
|
|
|
bool gds_switch_needed;
|
2017-06-01 20:42:59 +07:00
|
|
|
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
|
2017-05-12 01:52:48 +07:00
|
|
|
|
2017-12-18 23:08:25 +07:00
|
|
|
if (job->vmid == 0)
|
2017-05-12 01:52:48 +07:00
|
|
|
return false;
|
2017-12-18 23:08:25 +07:00
|
|
|
id = &id_mgr->ids[job->vmid];
|
2017-05-12 01:52:48 +07:00
|
|
|
gds_switch_needed = ring->funcs->emit_gds_switch && (
|
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
2017-03-09 23:36:26 +07:00
|
|
|
|
2017-12-18 22:53:03 +07:00
|
|
|
if (amdgpu_vmid_had_gpu_reset(adev, id))
|
2017-05-12 01:52:48 +07:00
|
|
|
return true;
|
2017-03-09 23:36:26 +07:00
|
|
|
|
2017-05-31 10:50:10 +07:00
|
|
|
return vm_flush_needed || gds_switch_needed;
|
2017-05-12 01:52:48 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_flush - hardware flush the vm
|
|
|
|
*
|
|
|
|
* @ring: ring to use for flush
|
2018-06-14 03:01:38 +07:00
|
|
|
* @job: related job
|
2018-06-11 22:11:24 +07:00
|
|
|
* @need_pipe_sync: is pipe sync needed
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-02-26 22:18:26 +07:00
|
|
|
* Emit a VM flush when it is necessary.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-06-06 16:25:13 +07:00
|
|
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-03-01 21:09:25 +07:00
|
|
|
struct amdgpu_device *adev = ring->adev;
|
2017-04-06 22:52:39 +07:00
|
|
|
unsigned vmhub = ring->funcs->vmhub;
|
2017-12-18 22:53:03 +07:00
|
|
|
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
2017-12-18 23:08:25 +07:00
|
|
|
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
|
2016-03-01 21:51:53 +07:00
|
|
|
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
|
2016-07-01 16:59:01 +07:00
|
|
|
id->gds_base != job->gds_base ||
|
|
|
|
id->gds_size != job->gds_size ||
|
|
|
|
id->gws_base != job->gws_base ||
|
|
|
|
id->gws_size != job->gws_size ||
|
|
|
|
id->oa_base != job->oa_base ||
|
|
|
|
id->oa_size != job->oa_size);
|
2017-05-18 12:56:22 +07:00
|
|
|
bool vm_flush_needed = job->vm_needs_flush;
|
2018-02-05 23:38:01 +07:00
|
|
|
bool pasid_mapping_needed = id->pasid != job->pasid ||
|
|
|
|
!id->pasid_mapping ||
|
|
|
|
!dma_fence_is_signaled(id->pasid_mapping);
|
|
|
|
struct dma_fence *fence = NULL;
|
2017-04-03 19:16:07 +07:00
|
|
|
unsigned patch_offset = 0;
|
2016-03-01 22:46:18 +07:00
|
|
|
int r;
|
2016-03-01 21:51:53 +07:00
|
|
|
|
2017-12-18 22:53:03 +07:00
|
|
|
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
|
2017-04-03 19:28:26 +07:00
|
|
|
gds_switch_needed = true;
|
|
|
|
vm_flush_needed = true;
|
2018-02-05 23:38:01 +07:00
|
|
|
pasid_mapping_needed = true;
|
2017-04-03 19:28:26 +07:00
|
|
|
}
|
2016-03-01 21:09:25 +07:00
|
|
|
|
2018-02-05 23:38:01 +07:00
|
|
|
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
|
2018-09-11 05:43:58 +07:00
|
|
|
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
|
|
|
|
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
|
2018-02-05 23:38:01 +07:00
|
|
|
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
|
|
|
|
ring->funcs->emit_wreg;
|
|
|
|
|
2017-06-06 16:25:13 +07:00
|
|
|
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
|
2017-04-03 19:28:26 +07:00
|
|
|
return 0;
|
2016-03-01 22:46:18 +07:00
|
|
|
|
2017-04-03 19:16:07 +07:00
|
|
|
if (ring->funcs->init_cond_exec)
|
|
|
|
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
2016-03-01 22:46:18 +07:00
|
|
|
|
2017-06-06 16:25:13 +07:00
|
|
|
if (need_pipe_sync)
|
|
|
|
amdgpu_ring_emit_pipeline_sync(ring);
|
|
|
|
|
2018-02-05 23:38:01 +07:00
|
|
|
if (vm_flush_needed) {
|
2017-12-18 23:08:25 +07:00
|
|
|
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
2018-02-04 16:32:35 +07:00
|
|
|
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
2018-02-05 23:38:01 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pasid_mapping_needed)
|
|
|
|
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
|
2018-02-05 23:38:01 +07:00
|
|
|
if (vm_flush_needed || pasid_mapping_needed) {
|
2018-04-04 00:05:03 +07:00
|
|
|
r = amdgpu_fence_emit(ring, &fence, 0);
|
2017-04-03 19:16:07 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
2018-02-05 23:38:01 +07:00
|
|
|
}
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
|
2018-02-05 23:38:01 +07:00
|
|
|
if (vm_flush_needed) {
|
2017-04-06 22:52:39 +07:00
|
|
|
mutex_lock(&id_mgr->lock);
|
2017-04-03 19:16:07 +07:00
|
|
|
dma_fence_put(id->last_flush);
|
2018-02-05 23:38:01 +07:00
|
|
|
id->last_flush = dma_fence_get(fence);
|
|
|
|
id->current_gpu_reset_count =
|
|
|
|
atomic_read(&adev->gpu_reset_counter);
|
2017-04-06 22:52:39 +07:00
|
|
|
mutex_unlock(&id_mgr->lock);
|
2017-04-03 19:16:07 +07:00
|
|
|
}
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
|
2018-02-05 23:38:01 +07:00
|
|
|
if (pasid_mapping_needed) {
|
|
|
|
id->pasid = job->pasid;
|
|
|
|
dma_fence_put(id->pasid_mapping);
|
|
|
|
id->pasid_mapping = dma_fence_get(fence);
|
|
|
|
}
|
|
|
|
dma_fence_put(fence);
|
|
|
|
|
2017-05-11 17:22:17 +07:00
|
|
|
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
|
2017-04-03 19:16:07 +07:00
|
|
|
id->gds_base = job->gds_base;
|
|
|
|
id->gds_size = job->gds_size;
|
|
|
|
id->gws_base = job->gws_base;
|
|
|
|
id->gws_size = job->gws_size;
|
|
|
|
id->oa_base = job->oa_base;
|
|
|
|
id->oa_size = job->oa_size;
|
2017-12-18 23:08:25 +07:00
|
|
|
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
|
2017-04-03 19:16:07 +07:00
|
|
|
job->gds_size, job->gws_base,
|
|
|
|
job->gws_size, job->oa_base,
|
|
|
|
job->oa_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ring->funcs->patch_cond_exec)
|
|
|
|
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
|
|
|
|
|
|
|
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
|
|
|
|
if (ring->funcs->emit_switch_buffer) {
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
|
|
|
amdgpu_ring_emit_switch_buffer(ring);
|
drm/amdgpu:changes in gfx DMAframe scheme (v2)
1) Adapt to vulkan:
Now use double SWITCH BUFFER to replace the 128 nops w/a,
because when vulkan introduced, umd can insert 7 ~ 16 IBs
per submit which makes 256 DW size cannot hold the whole
DMAframe (if we still insert those 128 nops), CP team suggests
use double SWITCH_BUFFERs, instead of tricky 128 NOPs w/a.
2) To fix the CE VM fault issue when MCBP introduced:
Need one more COND_EXEC wrapping IB part (original one us
for VM switch part).
this change can fix vm fault issue caused by below scenario
without this change:
>CE passed original COND_EXEC (no MCBP issued this moment),
proceed as normal.
>DE catch up to this COND_EXEC, but this time MCBP issued,
thus DE treats all following packages as NOP. The following
VM switch packages now looks just as NOP to DE, so DE
dosen't do VM flush at all.
>Now CE proceeds to the first IBc, and triggers VM fault,
because DE didn't do VM flush for this DMAframe.
3) change estimated alloc size for gfx9.
with new DMAframe scheme, we need modify emit_frame_size
for gfx9
4) No need to insert 128 nops after gfx8 vm flush anymore
because there was double SWITCH_BUFFER append to vm flush,
and for gfx7 we already use double SWITCH_BUFFER following
after vm_flush so no change needed for it.
5) Change emit_frame_size for gfx8
v2: squash in BUG removal from Monk
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-03-15 11:18:57 +07:00
|
|
|
}
|
2016-03-01 22:46:18 +07:00
|
|
|
return 0;
|
2016-03-01 21:09:25 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
|
|
|
|
*
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: requested buffer object
|
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Find @bo inside the requested vm.
|
2015-04-21 03:55:21 +07:00
|
|
|
* Search inside the @bos vm list for the requested vm
|
|
|
|
* Returns the found bo_va or NULL if none is found
|
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Found bo_va or NULL.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
2018-09-11 01:02:46 +07:00
|
|
|
struct amdgpu_vm_bo_base *base;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-11 01:02:46 +07:00
|
|
|
for (base = bo->vm_bo; base; base = base->next) {
|
|
|
|
if (base->vm != vm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return container_of(base, struct amdgpu_bo_va, base);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-08-12 18:29:18 +07:00
|
|
|
* amdgpu_vm_do_set_ptes - helper to call the right asic function
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-08-04 19:52:50 +07:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 22:54:25 +07:00
|
|
|
* @bo: PD/PT to update
|
2015-04-21 03:55:21 +07:00
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the right asic functions
|
|
|
|
* to setup the page table using the DMA.
|
|
|
|
*/
|
2016-08-12 18:29:18 +07:00
|
|
|
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 22:54:25 +07:00
|
|
|
struct amdgpu_bo *bo,
|
2016-08-12 18:29:18 +07:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 15:19:19 +07:00
|
|
|
uint64_t flags)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2018-01-16 22:54:25 +07:00
|
|
|
pe += amdgpu_bo_gpu_offset(bo);
|
2016-09-25 21:11:52 +07:00
|
|
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-12 18:29:18 +07:00
|
|
|
if (count < 3) {
|
2016-08-12 16:33:30 +07:00
|
|
|
amdgpu_vm_write_pte(params->adev, params->ib, pe,
|
|
|
|
addr | flags, count, incr);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
} else {
|
2016-08-04 20:02:49 +07:00
|
|
|
amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
|
2015-04-21 03:55:21 +07:00
|
|
|
count, incr, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 18:29:18 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 22:54:25 +07:00
|
|
|
* @bo: PD/PT to update
|
2016-08-12 18:29:18 +07:00
|
|
|
* @pe: addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Traces the parameters and calls the DMA function to copy the PTEs.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 22:54:25 +07:00
|
|
|
struct amdgpu_bo *bo,
|
2016-08-12 18:29:18 +07:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
2016-09-21 15:19:19 +07:00
|
|
|
uint64_t flags)
|
2016-08-12 18:29:18 +07:00
|
|
|
{
|
2016-09-25 21:11:52 +07:00
|
|
|
uint64_t src = (params->src + (addr >> 12) * 8);
|
2016-08-12 18:29:18 +07:00
|
|
|
|
2018-01-16 22:54:25 +07:00
|
|
|
pe += amdgpu_bo_gpu_offset(bo);
|
2016-09-25 21:11:52 +07:00
|
|
|
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
|
|
|
|
|
|
|
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
|
2016-08-12 18:29:18 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
2015-11-30 19:26:07 +07:00
|
|
|
* amdgpu_vm_map_gart - Resolve gart mapping of addr
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2015-11-30 19:26:07 +07:00
|
|
|
* @pages_addr: optional DMA address to use for lookup
|
2015-04-21 03:55:21 +07:00
|
|
|
* @addr: the unmapped addr
|
|
|
|
*
|
|
|
|
* Look up the physical address of the page that the pte resolves
|
2018-06-11 22:11:24 +07:00
|
|
|
* to.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The pointer for the page table entry.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2016-08-12 16:33:30 +07:00
|
|
|
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
uint64_t result;
|
|
|
|
|
2016-08-12 16:33:30 +07:00
|
|
|
/* page table offset */
|
|
|
|
result = pages_addr[addr >> PAGE_SHIFT];
|
2015-11-30 19:26:07 +07:00
|
|
|
|
2016-08-12 16:33:30 +07:00
|
|
|
/* in case cpu page size != gpu page size*/
|
|
|
|
result |= addr & (~PAGE_MASK);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-11-30 19:26:07 +07:00
|
|
|
result &= 0xFFFFFFFFFFFFF000ULL;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-12 02:50:08 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2018-01-16 22:54:25 +07:00
|
|
|
* @bo: PD/PT to update
|
2017-05-12 02:50:08 +07:00
|
|
|
* @pe: kmap addr of the page entry
|
|
|
|
* @addr: dst addr to write into pe
|
|
|
|
* @count: number of page entries to update
|
|
|
|
* @incr: increase next addr by incr bytes
|
|
|
|
* @flags: hw access flags
|
|
|
|
*
|
|
|
|
* Write count number of PT/PD entries directly.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
|
2018-01-16 22:54:25 +07:00
|
|
|
struct amdgpu_bo *bo,
|
2017-05-12 02:50:08 +07:00
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
|
|
|
uint64_t flags)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2017-05-12 06:47:22 +07:00
|
|
|
uint64_t value;
|
2017-05-12 02:50:08 +07:00
|
|
|
|
2018-01-16 22:54:25 +07:00
|
|
|
pe += (unsigned long)amdgpu_bo_kptr(bo);
|
|
|
|
|
2017-07-11 22:15:37 +07:00
|
|
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
|
|
|
|
2017-05-12 02:50:08 +07:00
|
|
|
for (i = 0; i < count; i++) {
|
2017-05-12 06:47:22 +07:00
|
|
|
value = params->pages_addr ?
|
|
|
|
amdgpu_vm_map_gart(params->pages_addr, addr) :
|
|
|
|
addr;
|
2018-01-12 21:26:08 +07:00
|
|
|
amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
|
|
|
|
i, value, flags);
|
2017-05-12 02:50:08 +07:00
|
|
|
addr += incr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_wait_pd - Wait for PT BOs to be free.
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: related vm
|
|
|
|
* @owner: fence owner
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 on success, errno otherwise.
|
|
|
|
*/
|
2017-07-11 22:13:00 +07:00
|
|
|
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
void *owner)
|
2017-05-12 02:50:08 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_sync sync;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
amdgpu_sync_create(&sync);
|
2017-09-16 07:44:06 +07:00
|
|
|
amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
|
2017-05-12 02:50:08 +07:00
|
|
|
r = amdgpu_sync_wait(&sync, true);
|
|
|
|
amdgpu_sync_free(&sync);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-08-30 20:55:54 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_func - helper to call update function
|
|
|
|
*
|
|
|
|
* Calls the update function for both the given BO as well as its shadow.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_bo *bo,
|
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
|
|
|
uint64_t flags)
|
|
|
|
{
|
|
|
|
if (bo->shadow)
|
|
|
|
params->func(params, bo->shadow, pe, addr, count, incr, flags);
|
|
|
|
params->func(params, bo, pe, addr, count, incr, flags);
|
|
|
|
}
|
|
|
|
|
2016-09-16 20:36:49 +07:00
|
|
|
/*
|
2017-12-01 01:08:05 +07:00
|
|
|
* amdgpu_vm_update_pde - update a single level in the hierarchy
|
2016-09-16 20:36:49 +07:00
|
|
|
*
|
2017-12-01 01:08:05 +07:00
|
|
|
* @param: parameters for the update
|
2016-09-16 20:36:49 +07:00
|
|
|
* @vm: requested vm
|
2016-10-12 20:13:52 +07:00
|
|
|
* @parent: parent directory
|
2017-12-01 01:08:05 +07:00
|
|
|
* @entry: entry to update
|
2016-09-16 20:36:49 +07:00
|
|
|
*
|
2017-12-01 01:08:05 +07:00
|
|
|
* Makes sure the requested entry in parent is up to date.
|
2016-09-16 20:36:49 +07:00
|
|
|
*/
|
2017-12-01 01:08:05 +07:00
|
|
|
static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_vm_pt *parent,
|
|
|
|
struct amdgpu_vm_pt *entry)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2018-01-16 22:54:25 +07:00
|
|
|
struct amdgpu_bo *bo = parent->base.bo, *pbo;
|
2017-11-29 19:27:26 +07:00
|
|
|
uint64_t pde, pt, flags;
|
|
|
|
unsigned level;
|
2015-07-21 15:52:10 +07:00
|
|
|
|
2017-12-01 01:08:05 +07:00
|
|
|
/* Don't update huge pages here */
|
|
|
|
if (entry->huge)
|
|
|
|
return;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-01-16 22:54:25 +07:00
|
|
|
for (level = 0, pbo = bo->parent; pbo; ++level)
|
2017-11-29 19:27:26 +07:00
|
|
|
pbo = pbo->parent;
|
|
|
|
|
2017-12-13 13:22:54 +07:00
|
|
|
level += params->adev->vm_manager.root_level;
|
2018-08-22 19:11:19 +07:00
|
|
|
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
|
2018-01-16 22:54:25 +07:00
|
|
|
pde = (entry - parent->entries) * 8;
|
2018-08-30 20:55:54 +07:00
|
|
|
amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-05-12 21:09:26 +07:00
|
|
|
/*
|
2018-09-08 18:05:34 +07:00
|
|
|
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
|
2017-05-12 21:09:26 +07:00
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: related vm
|
2017-05-12 21:09:26 +07:00
|
|
|
*
|
|
|
|
* Mark all PD level as invalid after an error.
|
|
|
|
*/
|
2018-09-08 18:05:34 +07:00
|
|
|
static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
2017-05-12 21:09:26 +07:00
|
|
|
{
|
2018-09-08 18:05:34 +07:00
|
|
|
struct amdgpu_vm_pt_cursor cursor;
|
|
|
|
struct amdgpu_vm_pt *entry;
|
2017-05-12 21:09:26 +07:00
|
|
|
|
2018-09-08 18:05:34 +07:00
|
|
|
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
|
|
|
|
if (entry->base.bo && !entry->base.moved)
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_relocated(&entry->base);
|
2017-05-12 21:09:26 +07:00
|
|
|
}
|
|
|
|
|
2016-10-12 20:13:52 +07:00
|
|
|
/*
|
|
|
|
* amdgpu_vm_update_directories - make sure that all directories are valid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Makes sure all directories are up to date.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2016-10-12 20:13:52 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm)
|
|
|
|
{
|
2017-12-01 01:08:05 +07:00
|
|
|
struct amdgpu_pte_update_params params;
|
|
|
|
struct amdgpu_job *job;
|
|
|
|
unsigned ndw = 0;
|
2017-09-30 15:14:13 +07:00
|
|
|
int r = 0;
|
2017-05-12 21:09:26 +07:00
|
|
|
|
2017-12-01 01:08:05 +07:00
|
|
|
if (list_empty(&vm->relocated))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
|
|
|
|
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.func = amdgpu_vm_cpu_set_ptes;
|
|
|
|
} else {
|
|
|
|
ndw = 512 * 8;
|
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.ib = &job->ibs[0];
|
|
|
|
params.func = amdgpu_vm_do_set_ptes;
|
|
|
|
}
|
|
|
|
|
2017-08-09 19:15:46 +07:00
|
|
|
while (!list_empty(&vm->relocated)) {
|
2017-12-01 01:08:05 +07:00
|
|
|
struct amdgpu_vm_pt *pt, *entry;
|
2017-08-09 19:15:46 +07:00
|
|
|
|
2018-08-30 20:55:54 +07:00
|
|
|
entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
|
|
|
|
base.vm_status);
|
|
|
|
amdgpu_vm_bo_idle(&entry->base);
|
2017-08-09 19:15:46 +07:00
|
|
|
|
2018-08-30 20:55:54 +07:00
|
|
|
pt = amdgpu_vm_pt_parent(entry);
|
|
|
|
if (!pt)
|
2017-12-01 01:08:05 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
amdgpu_vm_update_pde(¶ms, vm, pt, entry);
|
|
|
|
|
|
|
|
if (!vm->use_cpu_for_update &&
|
|
|
|
(ndw - params.ib->length_dw) < 32)
|
|
|
|
break;
|
2017-08-09 19:15:46 +07:00
|
|
|
}
|
2017-05-12 21:09:26 +07:00
|
|
|
|
2017-07-11 22:23:29 +07:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* Flush HDP */
|
|
|
|
mb();
|
2018-01-19 20:17:40 +07:00
|
|
|
amdgpu_asic_flush_hdp(adev, NULL);
|
2017-12-01 01:08:05 +07:00
|
|
|
} else if (params.ib->length_dw == 0) {
|
|
|
|
amdgpu_job_free(job);
|
|
|
|
} else {
|
|
|
|
struct amdgpu_bo *root = vm->root.base.bo;
|
|
|
|
struct amdgpu_ring *ring;
|
|
|
|
struct dma_fence *fence;
|
|
|
|
|
2018-07-20 19:21:06 +07:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
|
2017-12-01 01:08:05 +07:00
|
|
|
sched);
|
|
|
|
|
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
|
|
|
|
AMDGPU_FENCE_OWNER_VM, false);
|
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
2018-07-13 18:54:56 +07:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
|
|
|
|
&fence);
|
2017-12-01 01:08:05 +07:00
|
|
|
if (r)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
amdgpu_bo_fence(root, fence, true);
|
|
|
|
dma_fence_put(vm->last_update);
|
|
|
|
vm->last_update = fence;
|
2017-07-11 22:23:29 +07:00
|
|
|
}
|
|
|
|
|
2017-12-01 01:08:05 +07:00
|
|
|
if (!list_empty(&vm->relocated))
|
|
|
|
goto restart;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2018-09-08 18:05:34 +07:00
|
|
|
amdgpu_vm_invalidate_pds(adev, vm);
|
2017-12-01 01:08:05 +07:00
|
|
|
amdgpu_job_free(job);
|
2017-05-12 21:09:26 +07:00
|
|
|
return r;
|
2016-10-12 20:13:52 +07:00
|
|
|
}
|
|
|
|
|
2017-07-26 03:35:38 +07:00
|
|
|
/**
|
2018-09-06 22:13:06 +07:00
|
|
|
* amdgpu_vm_update_huge - figure out parameters for PTE updates
|
2017-07-26 03:35:38 +07:00
|
|
|
*
|
2018-09-06 22:13:06 +07:00
|
|
|
* Make sure to set the right flags for the PTEs at the desired level.
|
2017-07-26 03:35:38 +07:00
|
|
|
*/
|
2018-09-06 22:13:06 +07:00
|
|
|
static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params,
|
|
|
|
struct amdgpu_bo *bo, unsigned level,
|
|
|
|
uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr,
|
|
|
|
uint64_t flags)
|
2017-07-26 03:35:38 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
{
|
|
|
|
if (level != AMDGPU_VM_PTB) {
|
2017-07-26 03:35:38 +07:00
|
|
|
flags |= AMDGPU_PDE_PTE;
|
2018-09-06 22:13:06 +07:00
|
|
|
amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
|
2017-07-26 03:35:38 +07:00
|
|
|
}
|
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_fragment - get fragment for PTEs
|
|
|
|
*
|
|
|
|
* @params: see amdgpu_pte_update_params definition
|
|
|
|
* @start: first PTE to handle
|
|
|
|
* @end: last PTE to handle
|
|
|
|
* @flags: hw mapping flags
|
|
|
|
* @frag: resulting fragment size
|
|
|
|
* @frag_end: end of this fragment
|
|
|
|
*
|
|
|
|
* Returns the first possible fragment for the start and end address.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
|
|
|
|
uint64_t start, uint64_t end, uint64_t flags,
|
|
|
|
unsigned int *frag, uint64_t *frag_end)
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* The MC L1 TLB supports variable sized pages, based on a fragment
|
|
|
|
* field in the PTE. When this field is set to a non-zero value, page
|
|
|
|
* granularity is increased from 4KB to (1 << (12 + frag)). The PTE
|
|
|
|
* flags are considered valid for all PTEs within the fragment range
|
|
|
|
* and corresponding mappings are assumed to be physically contiguous.
|
|
|
|
*
|
|
|
|
* The L1 TLB can store a single PTE for the whole fragment,
|
|
|
|
* significantly increasing the space available for translation
|
|
|
|
* caching. This leads to large improvements in throughput when the
|
|
|
|
* TLB is under pressure.
|
|
|
|
*
|
|
|
|
* The L2 TLB distributes small and large fragments into two
|
|
|
|
* asymmetric partitions. The large fragment cache is significantly
|
|
|
|
* larger. Thus, we try to use large fragments wherever possible.
|
|
|
|
* Userspace can support this by aligning virtual base address and
|
|
|
|
* allocation size to the fragment size.
|
2018-09-07 19:21:15 +07:00
|
|
|
*
|
|
|
|
* Starting with Vega10 the fragment size only controls the L1. The L2
|
|
|
|
* is now directly feed with small/huge/giant pages from the walker.
|
2018-09-06 22:13:06 +07:00
|
|
|
*/
|
2018-09-07 19:21:15 +07:00
|
|
|
unsigned max_frag;
|
|
|
|
|
|
|
|
if (params->adev->asic_type < CHIP_VEGA10)
|
|
|
|
max_frag = params->adev->vm_manager.fragment_size;
|
|
|
|
else
|
|
|
|
max_frag = 31;
|
2018-09-06 22:13:06 +07:00
|
|
|
|
|
|
|
/* system pages are non continuously */
|
2018-09-08 01:34:17 +07:00
|
|
|
if (params->src) {
|
2018-09-06 22:13:06 +07:00
|
|
|
*frag = 0;
|
|
|
|
*frag_end = end;
|
2017-08-04 00:24:06 +07:00
|
|
|
return;
|
2017-12-21 21:47:28 +07:00
|
|
|
}
|
2017-07-26 03:35:38 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
/* This intentionally wraps around if no bit is set */
|
|
|
|
*frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
|
|
|
|
if (*frag >= max_frag) {
|
|
|
|
*frag = max_frag;
|
|
|
|
*frag_end = end & ~((1ULL << max_frag) - 1);
|
|
|
|
} else {
|
|
|
|
*frag_end = start + (1 << *frag);
|
|
|
|
}
|
2016-10-25 20:52:28 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_ptes - make sure that page tables are valid
|
|
|
|
*
|
2016-08-04 19:52:50 +07:00
|
|
|
* @params: see amdgpu_pte_update_params definition
|
2015-04-21 03:55:21 +07:00
|
|
|
* @start: start of GPU address range
|
|
|
|
* @end: end of GPU address range
|
2016-06-07 05:13:26 +07:00
|
|
|
* @dst: destination address to map to, the next dst inside the function
|
2015-04-21 03:55:21 +07:00
|
|
|
* @flags: mapping flags
|
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Update the page tables in the range @start - @end.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-05-12 09:39:31 +07:00
|
|
|
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
2018-09-06 22:13:06 +07:00
|
|
|
uint64_t start, uint64_t end,
|
|
|
|
uint64_t dst, uint64_t flags)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-03-29 15:08:32 +07:00
|
|
|
struct amdgpu_device *adev = params->adev;
|
2018-09-03 19:34:51 +07:00
|
|
|
struct amdgpu_vm_pt_cursor cursor;
|
2018-09-06 22:13:06 +07:00
|
|
|
uint64_t frag_start = start, frag_end;
|
|
|
|
unsigned int frag;
|
|
|
|
|
|
|
|
/* figure out the initial fragment */
|
|
|
|
amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
/* walk over the address space and update the PTs */
|
|
|
|
amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
|
|
|
|
while (cursor.pfn < end) {
|
2018-09-03 19:34:51 +07:00
|
|
|
struct amdgpu_bo *pt = cursor.entry->base.bo;
|
2018-09-15 15:02:13 +07:00
|
|
|
unsigned shift, parent_shift, mask;
|
2018-09-06 22:13:06 +07:00
|
|
|
uint64_t incr, entry_end, pe_start;
|
2017-07-26 03:35:38 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
if (!pt)
|
2017-07-26 03:35:38 +07:00
|
|
|
return -ENOENT;
|
2016-10-25 20:52:28 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
/* The root level can't be a huge page */
|
|
|
|
if (cursor.level == adev->vm_manager.root_level) {
|
|
|
|
if (!amdgpu_vm_pt_descendant(adev, &cursor))
|
|
|
|
return -ENOENT;
|
2017-07-26 03:35:38 +07:00
|
|
|
continue;
|
2018-09-03 19:34:51 +07:00
|
|
|
}
|
2017-07-26 03:35:38 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
/* If it isn't already handled it can't be a huge page */
|
|
|
|
if (cursor.entry->huge) {
|
|
|
|
/* Add the entry to the relocated list to update it. */
|
|
|
|
cursor.entry->huge = false;
|
|
|
|
amdgpu_vm_bo_relocated(&cursor.entry->base);
|
|
|
|
}
|
2016-08-05 18:56:35 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
shift = amdgpu_vm_level_shift(adev, cursor.level);
|
|
|
|
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
|
|
|
|
if (adev->asic_type < CHIP_VEGA10) {
|
|
|
|
/* No huge page support before GMC v9 */
|
|
|
|
if (cursor.level != AMDGPU_VM_PTB) {
|
|
|
|
if (!amdgpu_vm_pt_descendant(adev, &cursor))
|
|
|
|
return -ENOENT;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else if (frag < shift) {
|
|
|
|
/* We can't use this level when the fragment size is
|
|
|
|
* smaller than the address shift. Go to the next
|
|
|
|
* child entry and try again.
|
|
|
|
*/
|
|
|
|
if (!amdgpu_vm_pt_descendant(adev, &cursor))
|
|
|
|
return -ENOENT;
|
|
|
|
continue;
|
|
|
|
} else if (frag >= parent_shift) {
|
|
|
|
/* If the fragment size is even larger than the parent
|
|
|
|
* shift we should go up one level and check it again.
|
|
|
|
*/
|
|
|
|
if (!amdgpu_vm_pt_ancestor(&cursor))
|
|
|
|
return -ENOENT;
|
|
|
|
continue;
|
2017-08-30 12:01:19 +07:00
|
|
|
}
|
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
/* Looks good so far, calculate parameters for the update */
|
|
|
|
incr = AMDGPU_GPU_PAGE_SIZE << shift;
|
2018-09-15 15:02:13 +07:00
|
|
|
mask = amdgpu_vm_entries_mask(adev, cursor.level);
|
|
|
|
pe_start = ((cursor.pfn >> shift) & mask) * 8;
|
|
|
|
entry_end = (mask + 1) << shift;
|
2018-09-06 22:13:06 +07:00
|
|
|
entry_end += cursor.pfn & ~(entry_end - 1);
|
|
|
|
entry_end = min(entry_end, end);
|
|
|
|
|
|
|
|
do {
|
|
|
|
uint64_t upd_end = min(entry_end, frag_end);
|
|
|
|
unsigned nptes = (upd_end - frag_start) >> shift;
|
|
|
|
|
|
|
|
amdgpu_vm_update_huge(params, pt, cursor.level,
|
|
|
|
pe_start, dst, nptes, incr,
|
|
|
|
flags | AMDGPU_PTE_FRAG(frag));
|
|
|
|
|
|
|
|
pe_start += nptes * 8;
|
|
|
|
dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
|
|
|
|
|
|
|
|
frag_start = upd_end;
|
|
|
|
if (frag_start >= frag_end) {
|
|
|
|
/* figure out the next fragment */
|
|
|
|
amdgpu_vm_fragment(params, frag_start, end,
|
|
|
|
flags, &frag, &frag_end);
|
|
|
|
if (frag < shift)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (frag_start < entry_end);
|
2016-08-05 18:56:35 +07:00
|
|
|
|
2018-11-13 00:08:31 +07:00
|
|
|
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
|
|
|
|
/* Mark all child entries as huge */
|
|
|
|
while (cursor.pfn < frag_start) {
|
|
|
|
cursor.entry->huge = true;
|
|
|
|
amdgpu_vm_pt_next(adev, &cursor);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (frag >= shift) {
|
|
|
|
/* or just move on to the next on the same level. */
|
2018-09-06 22:13:06 +07:00
|
|
|
amdgpu_vm_pt_next(adev, &cursor);
|
2018-11-13 00:08:31 +07:00
|
|
|
}
|
2016-08-05 18:56:35 +07:00
|
|
|
}
|
2017-08-30 12:01:19 +07:00
|
|
|
|
|
|
|
return 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 15:17:58 +07:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-19 03:00:35 +07:00
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2015-04-21 03:55:21 +07:00
|
|
|
* @vm: requested vm
|
2016-01-25 20:27:31 +07:00
|
|
|
* @start: start of mapped range
|
|
|
|
* @last: last mapped entry
|
|
|
|
* @flags: flags for the entries
|
2015-04-21 03:55:21 +07:00
|
|
|
* @addr: addr to set the area to
|
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
2016-01-25 20:27:31 +07:00
|
|
|
* Fill in the page table entries between @start and @last.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *exclusive,
|
2016-03-19 03:00:35 +07:00
|
|
|
dma_addr_t *pages_addr,
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_vm *vm,
|
2016-01-25 20:27:31 +07:00
|
|
|
uint64_t start, uint64_t last,
|
2016-09-21 15:19:19 +07:00
|
|
|
uint64_t flags, uint64_t addr,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence **fence)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2016-02-08 23:37:38 +07:00
|
|
|
struct amdgpu_ring *ring;
|
2016-01-26 17:40:46 +07:00
|
|
|
void *owner = AMDGPU_FENCE_OWNER_VM;
|
2015-04-21 03:55:21 +07:00
|
|
|
unsigned nptes, ncmds, ndw;
|
2016-02-01 18:20:25 +07:00
|
|
|
struct amdgpu_job *job;
|
2016-08-04 19:52:50 +07:00
|
|
|
struct amdgpu_pte_update_params params;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 03:55:21 +07:00
|
|
|
int r;
|
|
|
|
|
2016-08-12 18:29:18 +07:00
|
|
|
memset(¶ms, 0, sizeof(params));
|
|
|
|
params.adev = adev;
|
2016-10-13 20:09:08 +07:00
|
|
|
params.vm = vm;
|
2016-08-12 18:29:18 +07:00
|
|
|
|
2017-07-11 22:13:00 +07:00
|
|
|
/* sync to everything on unmapping */
|
|
|
|
if (!(flags & AMDGPU_PTE_VALID))
|
|
|
|
owner = AMDGPU_FENCE_OWNER_UNDEFINED;
|
|
|
|
|
2017-05-12 06:47:22 +07:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* params.src is used as flag to indicate system Memory */
|
|
|
|
if (pages_addr)
|
|
|
|
params.src = ~0;
|
|
|
|
|
|
|
|
/* Wait for PT BOs to be free. PTs share the same resv. object
|
|
|
|
* as the root PD BO
|
|
|
|
*/
|
2017-07-11 22:13:00 +07:00
|
|
|
r = amdgpu_vm_wait_pd(adev, vm, owner);
|
2017-05-12 06:47:22 +07:00
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
params.func = amdgpu_vm_cpu_set_ptes;
|
|
|
|
params.pages_addr = pages_addr;
|
2018-09-06 22:13:06 +07:00
|
|
|
return amdgpu_vm_update_ptes(¶ms, start, last + 1,
|
|
|
|
addr, flags);
|
2017-05-12 06:47:22 +07:00
|
|
|
}
|
|
|
|
|
2018-07-20 19:21:06 +07:00
|
|
|
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
|
2016-08-04 20:02:49 +07:00
|
|
|
|
2016-01-25 20:27:31 +07:00
|
|
|
nptes = last - start + 1;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/*
|
2017-09-07 18:23:21 +07:00
|
|
|
* reserve space for two commands every (1 << BLOCK_SIZE)
|
2015-04-21 03:55:21 +07:00
|
|
|
* entries or 2k dwords (whatever is smaller)
|
2017-09-07 18:23:21 +07:00
|
|
|
*
|
|
|
|
* The second command is for the shadow pagetables.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-12-29 12:13:08 +07:00
|
|
|
if (vm->root.base.bo->shadow)
|
|
|
|
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
|
|
|
|
else
|
|
|
|
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
/* padding, etc. */
|
|
|
|
ndw = 64;
|
|
|
|
|
2017-08-30 20:38:45 +07:00
|
|
|
if (pages_addr) {
|
2016-08-11 19:06:54 +07:00
|
|
|
/* copy commands needed */
|
2017-09-19 23:58:15 +07:00
|
|
|
ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-11 19:06:54 +07:00
|
|
|
/* and also PTEs */
|
2015-04-21 03:55:21 +07:00
|
|
|
ndw += nptes * 2;
|
|
|
|
|
2016-08-12 18:29:18 +07:00
|
|
|
params.func = amdgpu_vm_do_copy_ptes;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
} else {
|
|
|
|
/* set page commands needed */
|
2018-01-25 01:58:45 +07:00
|
|
|
ndw += ncmds * 10;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-08-30 12:01:19 +07:00
|
|
|
/* extra commands for begin/end fragments */
|
2018-06-08 15:36:22 +07:00
|
|
|
if (vm->root.base.bo->shadow)
|
|
|
|
ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
|
|
|
|
else
|
|
|
|
ndw += 2 * 10 * adev->vm_manager.fragment_size;
|
2016-08-12 18:29:18 +07:00
|
|
|
|
|
|
|
params.func = amdgpu_vm_do_set_ptes;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-02-01 18:20:25 +07:00
|
|
|
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
|
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2016-02-01 18:20:25 +07:00
|
|
|
|
2016-08-04 19:52:50 +07:00
|
|
|
params.ib = &job->ibs[0];
|
2015-07-21 15:52:10 +07:00
|
|
|
|
2017-08-30 20:38:45 +07:00
|
|
|
if (pages_addr) {
|
2016-08-11 19:06:54 +07:00
|
|
|
uint64_t *pte;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* Put the PTEs at the end of the IB. */
|
|
|
|
i = ndw - nptes * 2;
|
|
|
|
pte= (uint64_t *)&(job->ibs->ptr[i]);
|
|
|
|
params.src = job->ibs->gpu_addr + i * 4;
|
|
|
|
|
|
|
|
for (i = 0; i < nptes; ++i) {
|
|
|
|
pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
|
|
|
|
AMDGPU_GPU_PAGE_SIZE);
|
|
|
|
pte[i] |= flags;
|
|
|
|
}
|
2016-09-25 16:54:00 +07:00
|
|
|
addr = 0;
|
2016-08-11 19:06:54 +07:00
|
|
|
}
|
|
|
|
|
2017-11-14 02:47:52 +07:00
|
|
|
r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
|
2016-06-06 15:17:58 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
|
|
|
|
2017-08-03 19:02:13 +07:00
|
|
|
r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
|
2017-09-16 07:44:06 +07:00
|
|
|
owner, false);
|
2016-01-26 17:40:46 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-06 22:13:06 +07:00
|
|
|
r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags);
|
2017-05-12 09:39:31 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-08-04 19:52:50 +07:00
|
|
|
amdgpu_ring_pad_ib(ring, params.ib);
|
|
|
|
WARN_ON(params.ib->length_dw > ndw);
|
2018-07-13 18:54:56 +07:00
|
|
|
r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
|
2015-08-03 11:57:31 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-08-03 19:02:13 +07:00
|
|
|
amdgpu_bo_fence(vm->root.base.bo, f, true);
|
2017-01-30 17:09:31 +07:00
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
2015-07-21 15:52:10 +07:00
|
|
|
|
|
|
|
error_free:
|
2016-02-01 18:20:25 +07:00
|
|
|
amdgpu_job_free(job);
|
2015-08-03 11:57:31 +07:00
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2016-01-25 20:27:31 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2016-06-06 15:17:58 +07:00
|
|
|
* @exclusive: fence we need to sync to
|
2016-03-30 15:50:25 +07:00
|
|
|
* @pages_addr: DMA addresses to use for mapping
|
2016-01-25 20:27:31 +07:00
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapped range and flags to use for the update
|
2016-03-30 15:50:25 +07:00
|
|
|
* @flags: HW flags for the mapping
|
2016-08-16 22:38:37 +07:00
|
|
|
* @nodes: array of drm_mm_nodes with the MC addresses
|
2016-01-25 20:27:31 +07:00
|
|
|
* @fence: optional resulting fence
|
|
|
|
*
|
|
|
|
* Split the mapping into smaller chunks so that each update fits
|
|
|
|
* into a SDMA IB.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2016-01-25 20:27:31 +07:00
|
|
|
*/
|
|
|
|
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *exclusive,
|
2016-03-30 15:50:25 +07:00
|
|
|
dma_addr_t *pages_addr,
|
2016-01-25 20:27:31 +07:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
2016-09-21 15:19:19 +07:00
|
|
|
uint64_t flags,
|
2016-08-16 22:38:37 +07:00
|
|
|
struct drm_mm_node *nodes,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence **fence)
|
2016-01-25 20:27:31 +07:00
|
|
|
{
|
2017-09-18 18:58:30 +07:00
|
|
|
unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
|
2017-08-30 20:38:45 +07:00
|
|
|
uint64_t pfn, start = mapping->start;
|
2016-01-25 20:27:31 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
|
|
|
|
* but in case of something, we filter the flags in first place
|
|
|
|
*/
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_READABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_READABLE;
|
|
|
|
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
|
|
|
flags &= ~AMDGPU_PTE_WRITEABLE;
|
|
|
|
|
2017-03-04 04:47:11 +07:00
|
|
|
flags &= ~AMDGPU_PTE_EXECUTABLE;
|
|
|
|
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
|
|
|
2017-03-04 04:49:39 +07:00
|
|
|
flags &= ~AMDGPU_PTE_MTYPE_MASK;
|
|
|
|
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
|
|
|
|
|
2017-04-19 08:53:29 +07:00
|
|
|
if ((mapping->flags & AMDGPU_PTE_PRT) &&
|
|
|
|
(adev->asic_type >= CHIP_VEGA10)) {
|
|
|
|
flags |= AMDGPU_PTE_PRT;
|
|
|
|
flags &= ~AMDGPU_PTE_VALID;
|
|
|
|
}
|
|
|
|
|
2016-01-25 20:27:31 +07:00
|
|
|
trace_amdgpu_vm_bo_update(mapping);
|
|
|
|
|
2016-08-16 22:38:37 +07:00
|
|
|
pfn = mapping->offset >> PAGE_SHIFT;
|
|
|
|
if (nodes) {
|
|
|
|
while (pfn >= nodes->size) {
|
|
|
|
pfn -= nodes->size;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-03-19 03:00:35 +07:00
|
|
|
}
|
2016-01-25 20:27:31 +07:00
|
|
|
|
2016-08-16 22:38:37 +07:00
|
|
|
do {
|
2017-09-18 18:58:30 +07:00
|
|
|
dma_addr_t *dma_addr = NULL;
|
2016-08-16 22:38:37 +07:00
|
|
|
uint64_t max_entries;
|
|
|
|
uint64_t addr, last;
|
2016-01-25 20:27:31 +07:00
|
|
|
|
2016-08-16 22:38:37 +07:00
|
|
|
if (nodes) {
|
|
|
|
addr = nodes->start << PAGE_SHIFT;
|
|
|
|
max_entries = (nodes->size - pfn) *
|
2018-06-22 23:54:03 +07:00
|
|
|
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2016-08-16 22:38:37 +07:00
|
|
|
} else {
|
|
|
|
addr = 0;
|
|
|
|
max_entries = S64_MAX;
|
|
|
|
}
|
2016-01-25 20:27:31 +07:00
|
|
|
|
2016-08-16 22:38:37 +07:00
|
|
|
if (pages_addr) {
|
2017-09-18 18:58:30 +07:00
|
|
|
uint64_t count;
|
|
|
|
|
2017-08-22 17:50:46 +07:00
|
|
|
max_entries = min(max_entries, 16ull * 1024ull);
|
2018-06-21 16:27:46 +07:00
|
|
|
for (count = 1;
|
2018-06-22 23:54:03 +07:00
|
|
|
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2018-06-21 16:27:46 +07:00
|
|
|
++count) {
|
2017-09-18 18:58:30 +07:00
|
|
|
uint64_t idx = pfn + count;
|
|
|
|
|
|
|
|
if (pages_addr[idx] !=
|
|
|
|
(pages_addr[idx - 1] + PAGE_SIZE))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count < min_linear_pages) {
|
|
|
|
addr = pfn << PAGE_SHIFT;
|
|
|
|
dma_addr = pages_addr;
|
|
|
|
} else {
|
|
|
|
addr = pages_addr[pfn];
|
2018-06-22 23:54:03 +07:00
|
|
|
max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2017-09-18 18:58:30 +07:00
|
|
|
}
|
|
|
|
|
2016-08-16 22:38:37 +07:00
|
|
|
} else if (flags & AMDGPU_PTE_VALID) {
|
|
|
|
addr += adev->vm_manager.vram_base_offset;
|
2017-09-18 18:58:30 +07:00
|
|
|
addr += pfn << PAGE_SHIFT;
|
2016-08-16 22:38:37 +07:00
|
|
|
}
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
last = min((uint64_t)mapping->last, start + max_entries - 1);
|
2017-09-18 18:58:30 +07:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
|
2016-01-25 20:27:31 +07:00
|
|
|
start, last, flags, addr,
|
|
|
|
fence);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2018-06-22 23:54:03 +07:00
|
|
|
pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
2016-08-16 22:38:37 +07:00
|
|
|
if (nodes && nodes->size == pfn) {
|
|
|
|
pfn = 0;
|
|
|
|
++nodes;
|
|
|
|
}
|
2016-01-25 20:27:31 +07:00
|
|
|
start = last + 1;
|
2016-08-16 22:38:37 +07:00
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
} while (unlikely(start != mapping->last + 1));
|
2016-01-25 20:27:31 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested BO and VM object
|
2016-08-16 19:43:17 +07:00
|
|
|
* @clear: if true clear the entries
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Fill in the page table entries for @bo_va.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -EINVAL for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
2016-08-16 19:43:17 +07:00
|
|
|
bool clear)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2016-03-30 15:50:25 +07:00
|
|
|
dma_addr_t *pages_addr = NULL;
|
2016-08-16 19:43:17 +07:00
|
|
|
struct ttm_mem_reg *mem;
|
2016-08-16 22:38:37 +07:00
|
|
|
struct drm_mm_node *nodes;
|
2017-09-11 21:54:59 +07:00
|
|
|
struct dma_fence *exclusive, **last_update;
|
2017-08-22 17:50:46 +07:00
|
|
|
uint64_t flags;
|
2015-04-21 03:55:21 +07:00
|
|
|
int r;
|
|
|
|
|
2018-07-04 17:08:54 +07:00
|
|
|
if (clear || !bo) {
|
2016-08-16 19:43:17 +07:00
|
|
|
mem = NULL;
|
2016-08-16 22:38:37 +07:00
|
|
|
nodes = NULL;
|
2016-08-16 19:43:17 +07:00
|
|
|
exclusive = NULL;
|
|
|
|
} else {
|
2016-03-30 15:50:25 +07:00
|
|
|
struct ttm_dma_tt *ttm;
|
|
|
|
|
2018-07-04 17:08:54 +07:00
|
|
|
mem = &bo->tbo.mem;
|
2016-08-16 22:38:37 +07:00
|
|
|
nodes = mem->mm_node;
|
|
|
|
if (mem->mem_type == TTM_PL_TT) {
|
2018-07-04 17:08:54 +07:00
|
|
|
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
2016-03-30 15:50:25 +07:00
|
|
|
pages_addr = ttm->dma_address;
|
2015-11-30 20:19:26 +07:00
|
|
|
}
|
2017-08-01 15:51:43 +07:00
|
|
|
exclusive = reservation_object_get_excl(bo->tbo.resv);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-08-22 17:50:46 +07:00
|
|
|
if (bo)
|
2017-08-01 15:51:43 +07:00
|
|
|
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
|
2017-08-22 17:50:46 +07:00
|
|
|
else
|
2017-01-30 17:01:38 +07:00
|
|
|
flags = 0x0;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-09-11 21:54:59 +07:00
|
|
|
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
|
|
|
|
last_update = &vm->last_update;
|
|
|
|
else
|
|
|
|
last_update = &bo_va->last_pt_update;
|
|
|
|
|
2017-08-23 21:13:33 +07:00
|
|
|
if (!clear && bo_va->base.moved) {
|
|
|
|
bo_va->base.moved = false;
|
2015-07-30 16:53:42 +07:00
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
2017-08-23 21:13:33 +07:00
|
|
|
|
2017-08-15 22:08:12 +07:00
|
|
|
} else if (bo_va->cleared != clear) {
|
|
|
|
list_splice_init(&bo_va->valids, &bo_va->invalids);
|
2017-08-23 21:13:33 +07:00
|
|
|
}
|
2015-07-30 16:53:42 +07:00
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2017-08-22 17:50:46 +07:00
|
|
|
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
|
2016-08-16 22:38:37 +07:00
|
|
|
mapping, flags, nodes,
|
2017-09-11 21:54:59 +07:00
|
|
|
last_update);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-08-15 22:08:12 +07:00
|
|
|
if (vm->use_cpu_for_update) {
|
|
|
|
/* Flush HDP */
|
|
|
|
mb();
|
2018-01-19 20:17:40 +07:00
|
|
|
amdgpu_asic_flush_hdp(adev, NULL);
|
2015-09-28 17:00:23 +07:00
|
|
|
}
|
|
|
|
|
2018-04-19 12:17:26 +07:00
|
|
|
/* If the BO is not in its preferred location add it back to
|
|
|
|
* the evicted list so that it gets validated again on the
|
|
|
|
* next command submission.
|
|
|
|
*/
|
2018-04-19 20:01:12 +07:00
|
|
|
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
|
|
|
uint32_t mem_type = bo->tbo.mem.mem_type;
|
|
|
|
|
|
|
|
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_evicted(&bo_va->base);
|
2018-04-19 20:01:12 +07:00
|
|
|
else
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_idle(&bo_va->base);
|
2018-09-01 18:25:31 +07:00
|
|
|
} else {
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_done(&bo_va->base);
|
2018-04-19 20:01:12 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-08-15 22:08:12 +07:00
|
|
|
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
|
|
|
bo_va->cleared = clear;
|
|
|
|
|
|
|
|
if (trace_amdgpu_vm_bo_mapping_enabled()) {
|
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list)
|
|
|
|
trace_amdgpu_vm_bo_mapping(mapping);
|
2017-07-11 22:23:29 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-30 17:09:31 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_update_prt_state - update the global PRT state
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-01-30 17:09:31 +07:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
|
2017-02-14 22:02:52 +07:00
|
|
|
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
|
2018-01-12 21:26:08 +07:00
|
|
|
adev->gmc.gmc_funcs->set_prt(adev, enable);
|
2017-01-30 17:09:31 +07:00
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-02-14 22:02:52 +07:00
|
|
|
/**
|
2017-03-13 16:13:36 +07:00
|
|
|
* amdgpu_vm_prt_get - add a PRT user
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-02-14 22:02:52 +07:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
|
|
|
|
{
|
2018-01-12 21:26:08 +07:00
|
|
|
if (!adev->gmc.gmc_funcs->set_prt)
|
2017-03-13 16:13:36 +07:00
|
|
|
return;
|
|
|
|
|
2017-02-14 22:02:52 +07:00
|
|
|
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
|
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-02-14 21:47:03 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_put - drop a PRT user
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2017-02-14 21:47:03 +07:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-02-14 22:02:52 +07:00
|
|
|
if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
|
2017-02-14 21:47:03 +07:00
|
|
|
amdgpu_vm_update_prt_state(adev);
|
|
|
|
}
|
|
|
|
|
2017-01-30 17:09:31 +07:00
|
|
|
/**
|
2017-02-14 22:02:52 +07:00
|
|
|
* amdgpu_vm_prt_cb - callback for updating the PRT status
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* @fence: fence for the callback
|
2018-06-14 03:01:38 +07:00
|
|
|
* @_cb: the callback function
|
2017-01-30 17:09:31 +07:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
|
|
|
|
{
|
|
|
|
struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
|
|
|
|
|
2017-02-14 21:47:03 +07:00
|
|
|
amdgpu_vm_prt_put(cb->adev);
|
2017-01-30 17:09:31 +07:00
|
|
|
kfree(cb);
|
|
|
|
}
|
|
|
|
|
2017-02-14 22:02:52 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @fence: fence for the callback
|
2017-02-14 22:02:52 +07:00
|
|
|
*/
|
|
|
|
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-03-13 16:13:36 +07:00
|
|
|
struct amdgpu_prt_cb *cb;
|
2017-02-14 22:02:52 +07:00
|
|
|
|
2018-01-12 21:26:08 +07:00
|
|
|
if (!adev->gmc.gmc_funcs->set_prt)
|
2017-03-13 16:13:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
|
2017-02-14 22:02:52 +07:00
|
|
|
if (!cb) {
|
|
|
|
/* Last resort when we are OOM */
|
|
|
|
if (fence)
|
|
|
|
dma_fence_wait(fence, false);
|
|
|
|
|
2017-04-04 01:41:39 +07:00
|
|
|
amdgpu_vm_prt_put(adev);
|
2017-02-14 22:02:52 +07:00
|
|
|
} else {
|
|
|
|
cb->adev = adev;
|
|
|
|
if (!fence || dma_fence_add_callback(fence, &cb->cb,
|
|
|
|
amdgpu_vm_prt_cb))
|
|
|
|
amdgpu_vm_prt_cb(fence, &cb->cb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-30 17:09:31 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_free_mapping - free a mapping
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @mapping: mapping to be freed
|
|
|
|
* @fence: fence of the unmap operation
|
|
|
|
*
|
|
|
|
* Free a mapping and make sure we decrease the PRT usage count if applicable.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping,
|
|
|
|
struct dma_fence *fence)
|
|
|
|
{
|
2017-02-14 22:02:52 +07:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_add_prt_cb(adev, fence);
|
|
|
|
kfree(mapping);
|
|
|
|
}
|
2017-01-30 17:09:31 +07:00
|
|
|
|
2017-02-14 22:02:52 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_prt_fini - finish all prt mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
|
|
|
* Register a cleanup callback to disable PRT support after VM dies.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
2017-08-03 19:02:13 +07:00
|
|
|
struct reservation_object *resv = vm->root.base.bo->tbo.resv;
|
2017-02-14 22:02:52 +07:00
|
|
|
struct dma_fence *excl, **shared;
|
|
|
|
unsigned i, shared_count;
|
|
|
|
int r;
|
2017-02-14 21:47:03 +07:00
|
|
|
|
2017-02-14 22:02:52 +07:00
|
|
|
r = reservation_object_get_fences_rcu(resv, &excl,
|
|
|
|
&shared_count, &shared);
|
|
|
|
if (r) {
|
|
|
|
/* Not enough memory to grab the fence list, as last resort
|
|
|
|
* block for all the fences to complete.
|
|
|
|
*/
|
|
|
|
reservation_object_wait_timeout_rcu(resv, true, false,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
return;
|
2017-01-30 17:09:31 +07:00
|
|
|
}
|
2017-02-14 22:02:52 +07:00
|
|
|
|
|
|
|
/* Add a callback for each fence in the reservation object */
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, excl);
|
|
|
|
|
|
|
|
for (i = 0; i < shared_count; ++i) {
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
amdgpu_vm_add_prt_cb(adev, shared[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(shared);
|
2017-01-30 17:09:31 +07:00
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_freed - clear freed BOs in the PT
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2017-03-24 01:36:31 +07:00
|
|
|
* @fence: optional resulting fence (unchanged if no work needed to be done
|
|
|
|
* or if an error occurred)
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Make sure all freed BOs are cleared in the PT.
|
|
|
|
* PTs have to be reserved and mutex must be locked!
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success.
|
|
|
|
*
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
2017-03-24 01:36:31 +07:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct dma_fence **fence)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2018-01-26 00:36:15 +07:00
|
|
|
uint64_t init_pte_value = 0;
|
2017-03-24 01:36:31 +07:00
|
|
|
struct dma_fence *f = NULL;
|
2015-04-21 03:55:21 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
while (!list_empty(&vm->freed)) {
|
|
|
|
mapping = list_first_entry(&vm->freed,
|
|
|
|
struct amdgpu_bo_va_mapping, list);
|
|
|
|
list_del(&mapping->list);
|
2016-03-08 23:52:01 +07:00
|
|
|
|
2018-08-27 23:22:31 +07:00
|
|
|
if (vm->pte_support_ats &&
|
|
|
|
mapping->start < AMDGPU_GMC_HOLE_START)
|
2017-09-01 02:55:00 +07:00
|
|
|
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
|
2017-07-27 23:48:22 +07:00
|
|
|
|
2017-08-30 20:38:45 +07:00
|
|
|
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
|
2017-04-19 19:41:19 +07:00
|
|
|
mapping->start, mapping->last,
|
2017-07-27 23:48:22 +07:00
|
|
|
init_pte_value, 0, &f);
|
2017-03-24 01:36:31 +07:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
2017-01-30 17:09:31 +07:00
|
|
|
if (r) {
|
2017-03-24 01:36:31 +07:00
|
|
|
dma_fence_put(f);
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
2017-01-30 17:09:31 +07:00
|
|
|
}
|
2017-03-24 01:36:31 +07:00
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-03-24 01:36:31 +07:00
|
|
|
if (fence && f) {
|
|
|
|
dma_fence_put(*fence);
|
|
|
|
*fence = f;
|
|
|
|
} else {
|
|
|
|
dma_fence_put(f);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2017-03-24 01:36:31 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-08-16 16:13:48 +07:00
|
|
|
* amdgpu_vm_handle_moved - handle moved BOs in the PT
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2017-08-16 16:13:48 +07:00
|
|
|
* Make sure all BOs which are moved are updated in the PTs.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2017-08-16 16:13:48 +07:00
|
|
|
* PTs have to be reserved!
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-08-16 16:13:48 +07:00
|
|
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
2017-09-11 21:54:59 +07:00
|
|
|
struct amdgpu_vm *vm)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2018-04-19 16:08:24 +07:00
|
|
|
struct amdgpu_bo_va *bo_va, *tmp;
|
2018-09-01 18:25:31 +07:00
|
|
|
struct reservation_object *resv;
|
2017-08-16 16:13:48 +07:00
|
|
|
bool clear;
|
2018-04-19 16:08:24 +07:00
|
|
|
int r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
|
|
|
|
/* Per VM BOs never need to bo cleared in the page tables */
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2016-03-09 00:03:27 +07:00
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
spin_lock(&vm->invalidated_lock);
|
|
|
|
while (!list_empty(&vm->invalidated)) {
|
|
|
|
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
|
|
|
|
base.vm_status);
|
|
|
|
resv = bo_va->base.bo->tbo.resv;
|
|
|
|
spin_unlock(&vm->invalidated_lock);
|
2017-09-02 01:34:27 +07:00
|
|
|
|
|
|
|
/* Try to reserve the BO to avoid clearing its ptes */
|
2018-09-01 18:25:31 +07:00
|
|
|
if (!amdgpu_vm_debug && reservation_object_trylock(resv))
|
2017-09-02 01:34:27 +07:00
|
|
|
clear = false;
|
|
|
|
/* Somebody else is using the BO right now */
|
|
|
|
else
|
|
|
|
clear = true;
|
2017-08-16 16:13:48 +07:00
|
|
|
|
|
|
|
r = amdgpu_vm_bo_update(adev, bo_va, clear);
|
2018-09-01 18:25:31 +07:00
|
|
|
if (r)
|
2015-04-21 03:55:21 +07:00
|
|
|
return r;
|
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
if (!clear)
|
2017-09-02 01:34:27 +07:00
|
|
|
reservation_object_unlock(resv);
|
2018-09-01 18:25:31 +07:00
|
|
|
spin_lock(&vm->invalidated_lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2018-09-01 18:25:31 +07:00
|
|
|
spin_unlock(&vm->invalidated_lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-04-19 16:08:24 +07:00
|
|
|
return 0;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_add - add a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
* @bo: amdgpu buffer object
|
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Add @bo into the requested vm.
|
2015-04-21 03:55:21 +07:00
|
|
|
* Add @bo to the list of bos associated with the vm
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Newly added bo_va or NULL for failure
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Object has to be reserved!
|
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va *bo_va;
|
|
|
|
|
|
|
|
bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
|
|
|
|
if (bo_va == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-04-24 11:14:39 +07:00
|
|
|
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
|
2017-08-01 15:51:43 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
bo_va->ref_count = 1;
|
2015-07-30 16:53:42 +07:00
|
|
|
INIT_LIST_HEAD(&bo_va->valids);
|
|
|
|
INIT_LIST_HEAD(&bo_va->invalids);
|
2016-03-09 00:03:27 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return bo_va;
|
|
|
|
}
|
|
|
|
|
2017-08-16 16:13:48 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_insert_mapping - insert a new mapping
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @mapping: the mapping to insert
|
|
|
|
*
|
|
|
|
* Insert a new mapping into all structures.
|
|
|
|
*/
|
|
|
|
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
struct amdgpu_bo_va_mapping *mapping)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
mapping->bo_va = bo_va;
|
2017-08-16 16:13:48 +07:00
|
|
|
list_add(&mapping->list, &bo_va->invalids);
|
|
|
|
amdgpu_vm_it_insert(mapping, &vm->va);
|
|
|
|
|
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
|
2018-04-19 19:22:56 +07:00
|
|
|
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
|
|
|
|
!bo_va->base.moved) {
|
|
|
|
list_move(&bo_va->base.vm_status, &vm->moved);
|
2017-08-16 16:13:48 +07:00
|
|
|
}
|
|
|
|
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_map - map bo inside a vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
2018-06-14 03:01:38 +07:00
|
|
|
* @size: BO size in bytes
|
2015-04-21 03:55:21 +07:00
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2015-11-13 13:18:38 +07:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
2017-01-18 20:49:43 +07:00
|
|
|
uint64_t size, uint64_t flags)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-03-30 19:03:59 +07:00
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-04-21 03:55:21 +07:00
|
|
|
uint64_t eaddr;
|
|
|
|
|
2015-05-18 19:37:27 +07:00
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
2015-11-13 13:18:38 +07:00
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
2015-05-18 19:37:27 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/* make sure object fit at this offset */
|
2015-11-24 05:43:48 +07:00
|
|
|
eaddr = saddr + size - 1;
|
2017-01-30 17:01:38 +07:00
|
|
|
if (saddr >= eaddr ||
|
2017-08-01 15:51:43 +07:00
|
|
|
(bo && offset + size > amdgpu_bo_size(bo)))
|
2015-04-21 03:55:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
if (tmp) {
|
2015-04-21 03:55:21 +07:00
|
|
|
/* bo and tmp overlap, invalid addr */
|
|
|
|
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
|
2017-08-01 15:51:43 +07:00
|
|
|
"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
|
2017-03-30 19:03:59 +07:00
|
|
|
tmp->start, tmp->last + 1);
|
2017-03-13 16:13:37 +07:00
|
|
|
return -EINVAL;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
2017-03-13 16:13:37 +07:00
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2015-04-21 03:55:21 +07:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
2017-08-16 16:13:48 +07:00
|
|
|
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
|
2017-03-13 16:13:39 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to store the address
|
|
|
|
* @saddr: where to map the BO
|
|
|
|
* @offset: requested offset in the BO
|
2018-06-14 03:01:38 +07:00
|
|
|
* @size: BO size in bytes
|
2017-03-13 16:13:39 +07:00
|
|
|
* @flags: attributes of pages (read/write/valid/etc.)
|
|
|
|
*
|
|
|
|
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
|
|
|
|
* mappings as we do so.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2017-03-13 16:13:39 +07:00
|
|
|
*
|
|
|
|
* Object has to be reserved and unreserved outside!
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t flags)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
2017-03-13 16:13:39 +07:00
|
|
|
uint64_t eaddr;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* validate the parameters */
|
|
|
|
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
|
|
|
|
size == 0 || size & AMDGPU_GPU_PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* make sure object fit at this offset */
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
if (saddr >= eaddr ||
|
2017-08-01 15:51:43 +07:00
|
|
|
(bo && offset + size > amdgpu_bo_size(bo)))
|
2017-03-13 16:13:39 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
|
|
|
if (!mapping)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-08-01 15:51:43 +07:00
|
|
|
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
|
2017-03-13 16:13:39 +07:00
|
|
|
if (r) {
|
|
|
|
kfree(mapping);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
mapping->start = saddr;
|
|
|
|
mapping->last = eaddr;
|
2017-03-13 16:13:39 +07:00
|
|
|
mapping->offset = offset;
|
|
|
|
mapping->flags = flags;
|
|
|
|
|
2017-08-16 16:13:48 +07:00
|
|
|
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
|
2017-03-13 16:13:36 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_unmap - remove bo mapping from vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: bo_va to remove the address from
|
|
|
|
* @saddr: where to the BO is mapped
|
|
|
|
*
|
|
|
|
* Remove a mapping of the BO at the specefied addr from the VM.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2015-11-13 13:18:38 +07:00
|
|
|
* Object has to be reserved and unreserved outside!
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t saddr)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2015-07-30 16:53:42 +07:00
|
|
|
bool valid = true;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-06-06 01:56:17 +07:00
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
2016-03-09 00:03:27 +07:00
|
|
|
|
2015-07-30 16:53:42 +07:00
|
|
|
list_for_each_entry(mapping, &bo_va->valids, list) {
|
2017-03-30 19:03:59 +07:00
|
|
|
if (mapping->start == saddr)
|
2015-04-21 03:55:21 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-07-30 16:53:42 +07:00
|
|
|
if (&mapping->list == &bo_va->valids) {
|
|
|
|
valid = false;
|
|
|
|
|
|
|
|
list_for_each_entry(mapping, &bo_va->invalids, list) {
|
2017-03-30 19:03:59 +07:00
|
|
|
if (mapping->start == saddr)
|
2015-07-30 16:53:42 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-03-09 00:03:27 +07:00
|
|
|
if (&mapping->list == &bo_va->invalids)
|
2015-07-30 16:53:42 +07:00
|
|
|
return -ENOENT;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2016-03-09 00:03:27 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-09-06 21:55:16 +07:00
|
|
|
mapping->bo_va = NULL;
|
2015-06-09 21:58:33 +07:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2016-03-08 23:52:01 +07:00
|
|
|
if (valid)
|
2015-04-21 03:55:21 +07:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
2016-03-08 23:52:01 +07:00
|
|
|
else
|
2017-01-30 17:09:31 +07:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-13 16:13:38 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: VM structure to use
|
|
|
|
* @saddr: start of the range
|
|
|
|
* @size: size of the range
|
|
|
|
*
|
|
|
|
* Remove all mappings in a range, split them as appropriate.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2017-03-13 16:13:38 +07:00
|
|
|
*/
|
|
|
|
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
|
|
|
|
LIST_HEAD(removed);
|
|
|
|
uint64_t eaddr;
|
|
|
|
|
|
|
|
eaddr = saddr + size - 1;
|
|
|
|
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
|
|
|
|
|
|
|
/* Allocate all the needed memory */
|
|
|
|
before = kzalloc(sizeof(*before), GFP_KERNEL);
|
|
|
|
if (!before)
|
|
|
|
return -ENOMEM;
|
2017-03-16 15:09:24 +07:00
|
|
|
INIT_LIST_HEAD(&before->list);
|
2017-03-13 16:13:38 +07:00
|
|
|
|
|
|
|
after = kzalloc(sizeof(*after), GFP_KERNEL);
|
|
|
|
if (!after) {
|
|
|
|
kfree(before);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-03-16 15:09:24 +07:00
|
|
|
INIT_LIST_HEAD(&after->list);
|
2017-03-13 16:13:38 +07:00
|
|
|
|
|
|
|
/* Now gather all removed mappings */
|
2017-03-30 19:03:59 +07:00
|
|
|
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
|
|
|
|
while (tmp) {
|
2017-03-13 16:13:38 +07:00
|
|
|
/* Remember mapping split at the start */
|
2017-03-30 19:03:59 +07:00
|
|
|
if (tmp->start < saddr) {
|
|
|
|
before->start = tmp->start;
|
|
|
|
before->last = saddr - 1;
|
2017-03-13 16:13:38 +07:00
|
|
|
before->offset = tmp->offset;
|
|
|
|
before->flags = tmp->flags;
|
2018-06-05 16:31:51 +07:00
|
|
|
before->bo_va = tmp->bo_va;
|
|
|
|
list_add(&before->list, &tmp->bo_va->invalids);
|
2017-03-13 16:13:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember mapping split at the end */
|
2017-03-30 19:03:59 +07:00
|
|
|
if (tmp->last > eaddr) {
|
|
|
|
after->start = eaddr + 1;
|
|
|
|
after->last = tmp->last;
|
2017-03-13 16:13:38 +07:00
|
|
|
after->offset = tmp->offset;
|
2017-03-30 19:03:59 +07:00
|
|
|
after->offset += after->start - tmp->start;
|
2017-03-13 16:13:38 +07:00
|
|
|
after->flags = tmp->flags;
|
2018-06-05 16:31:51 +07:00
|
|
|
after->bo_va = tmp->bo_va;
|
|
|
|
list_add(&after->list, &tmp->bo_va->invalids);
|
2017-03-13 16:13:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&tmp->list);
|
|
|
|
list_add(&tmp->list, &removed);
|
2017-03-30 19:03:59 +07:00
|
|
|
|
|
|
|
tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
|
2017-03-13 16:13:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And free them up */
|
|
|
|
list_for_each_entry_safe(tmp, next, &removed, list) {
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_remove(tmp, &vm->va);
|
2017-03-13 16:13:38 +07:00
|
|
|
list_del(&tmp->list);
|
|
|
|
|
2017-03-30 19:03:59 +07:00
|
|
|
if (tmp->start < saddr)
|
|
|
|
tmp->start = saddr;
|
|
|
|
if (tmp->last > eaddr)
|
|
|
|
tmp->last = eaddr;
|
2017-03-13 16:13:38 +07:00
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
tmp->bo_va = NULL;
|
2017-03-13 16:13:38 +07:00
|
|
|
list_add(&tmp->list, &vm->freed);
|
|
|
|
trace_amdgpu_vm_bo_unmap(NULL, tmp);
|
|
|
|
}
|
|
|
|
|
2017-03-16 15:09:24 +07:00
|
|
|
/* Insert partial mapping before the range */
|
|
|
|
if (!list_empty(&before->list)) {
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_insert(before, &vm->va);
|
2017-03-13 16:13:38 +07:00
|
|
|
if (before->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(before);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert partial mapping after the range */
|
2017-03-16 15:09:24 +07:00
|
|
|
if (!list_empty(&after->list)) {
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_insert(after, &vm->va);
|
2017-03-13 16:13:38 +07:00
|
|
|
if (after->flags & AMDGPU_PTE_PRT)
|
|
|
|
amdgpu_vm_prt_get(adev);
|
|
|
|
} else {
|
|
|
|
kfree(after);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-06 21:55:16 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_lookup_mapping - find mapping by address
|
|
|
|
*
|
|
|
|
* @vm: the requested VM
|
2018-06-14 03:01:38 +07:00
|
|
|
* @addr: the address
|
2017-09-06 21:55:16 +07:00
|
|
|
*
|
|
|
|
* Find a mapping by it's address.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The amdgpu_bo_va_mapping matching for addr or NULL
|
|
|
|
*
|
2017-09-06 21:55:16 +07:00
|
|
|
*/
|
|
|
|
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
|
|
|
uint64_t addr)
|
|
|
|
{
|
|
|
|
return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
|
|
|
|
}
|
|
|
|
|
2018-07-27 21:56:34 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_trace_cs - trace all reserved mappings
|
|
|
|
*
|
|
|
|
* @vm: the requested vm
|
|
|
|
* @ticket: CS ticket
|
|
|
|
*
|
|
|
|
* Trace all mappings of BOs reserved during a command submission.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping;
|
|
|
|
|
|
|
|
if (!trace_amdgpu_vm_bo_cs_enabled())
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
|
|
|
|
mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
|
|
|
|
if (mapping->bo_va && mapping->bo_va->base.bo) {
|
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
|
|
|
|
bo = mapping->bo_va->base.bo;
|
|
|
|
if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_amdgpu_vm_bo_cs(mapping);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo_va: requested bo_va
|
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Remove @bo_va->bo from the requested vm.
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
|
|
|
* Object have to be reserved!
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *next;
|
2018-09-05 22:04:44 +07:00
|
|
|
struct amdgpu_bo *bo = bo_va->base.bo;
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_vm *vm = bo_va->base.vm;
|
2018-09-11 01:02:46 +07:00
|
|
|
struct amdgpu_vm_bo_base **base;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-11 01:02:46 +07:00
|
|
|
if (bo) {
|
|
|
|
if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
|
|
|
|
vm->bulk_moveable = false;
|
2018-09-05 22:04:44 +07:00
|
|
|
|
2018-09-11 01:02:46 +07:00
|
|
|
for (base = &bo_va->base.bo->vm_bo; *base;
|
|
|
|
base = &(*base)->next) {
|
|
|
|
if (*base != &bo_va->base)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
*base = bo_va->base.next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
spin_lock(&vm->invalidated_lock);
|
2017-08-01 15:51:43 +07:00
|
|
|
list_del(&bo_va->base.vm_status);
|
2018-09-01 18:25:31 +07:00
|
|
|
spin_unlock(&vm->invalidated_lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2015-07-30 16:53:42 +07:00
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
2015-04-21 03:55:21 +07:00
|
|
|
list_del(&mapping->list);
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-09-06 21:55:16 +07:00
|
|
|
mapping->bo_va = NULL;
|
2015-06-09 21:58:33 +07:00
|
|
|
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
2015-07-30 16:53:42 +07:00
|
|
|
list_add(&mapping->list, &vm->freed);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
|
|
|
|
list_del(&mapping->list);
|
2017-03-30 19:03:59 +07:00
|
|
|
amdgpu_vm_it_remove(mapping, &vm->va);
|
2017-01-30 17:09:31 +07:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping,
|
|
|
|
bo_va->last_pt_update);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2016-03-09 00:03:27 +07:00
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(bo_va->last_pt_update);
|
2015-04-21 03:55:21 +07:00
|
|
|
kfree(bo_va);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_bo_invalidate - mark the bo as invalid
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @bo: amdgpu buffer object
|
2018-06-14 03:01:38 +07:00
|
|
|
* @evicted: is the BO evicted
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Mark @bo as invalid.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
|
|
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
2017-08-03 19:02:13 +07:00
|
|
|
struct amdgpu_bo *bo, bool evicted)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2017-08-01 15:51:43 +07:00
|
|
|
struct amdgpu_vm_bo_base *bo_base;
|
|
|
|
|
2018-04-24 12:54:10 +07:00
|
|
|
/* shadow bo doesn't have bo base, its validation needs its parent */
|
|
|
|
if (bo->parent && bo->parent->shadow == bo)
|
|
|
|
bo = bo->parent;
|
|
|
|
|
2018-09-11 01:02:46 +07:00
|
|
|
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
|
2017-08-03 19:02:13 +07:00
|
|
|
struct amdgpu_vm *vm = bo_base->vm;
|
|
|
|
|
|
|
|
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
|
2018-08-30 15:27:15 +07:00
|
|
|
amdgpu_vm_bo_evicted(bo_base);
|
2017-08-03 19:02:13 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:27:15 +07:00
|
|
|
if (bo_base->moved)
|
2017-08-03 19:02:13 +07:00
|
|
|
continue;
|
2018-08-30 15:27:15 +07:00
|
|
|
bo_base->moved = true;
|
2017-08-03 19:02:13 +07:00
|
|
|
|
2018-08-30 15:27:15 +07:00
|
|
|
if (bo->tbo.type == ttm_bo_type_kernel)
|
|
|
|
amdgpu_vm_bo_relocated(bo_base);
|
|
|
|
else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
|
|
|
|
amdgpu_vm_bo_moved(bo_base);
|
|
|
|
else
|
|
|
|
amdgpu_vm_bo_invalidated(bo_base);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
|
|
|
|
*
|
|
|
|
* @vm_size: VM size
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* VM page table as power of two
|
|
|
|
*/
|
2017-04-05 12:54:56 +07:00
|
|
|
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
|
|
|
|
{
|
|
|
|
/* Total bits covered by PD + PTs */
|
|
|
|
unsigned bits = ilog2(vm_size) + 18;
|
|
|
|
|
|
|
|
/* Make sure the PD is 4K in size up to 8GB address space.
|
|
|
|
Above that split equal between PD and PTs */
|
|
|
|
if (vm_size <= 8)
|
|
|
|
return (bits - 9);
|
|
|
|
else
|
|
|
|
return ((bits + 3) / 2);
|
|
|
|
}
|
|
|
|
|
2017-08-15 15:05:59 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
|
2017-04-05 12:54:56 +07:00
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
2018-08-22 04:14:32 +07:00
|
|
|
* @min_vm_size: the minimum vm size in GB if it's set auto
|
2018-06-14 03:01:38 +07:00
|
|
|
* @fragment_size_default: Default PTE fragment size
|
|
|
|
* @max_level: max VMPT level
|
|
|
|
* @max_bits: max address space size in bits
|
|
|
|
*
|
2017-04-05 12:54:56 +07:00
|
|
|
*/
|
2018-08-22 04:14:32 +07:00
|
|
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
2017-11-23 18:57:18 +07:00
|
|
|
uint32_t fragment_size_default, unsigned max_level,
|
|
|
|
unsigned max_bits)
|
2017-04-05 12:54:56 +07:00
|
|
|
{
|
2018-08-22 04:14:32 +07:00
|
|
|
unsigned int max_size = 1 << (max_bits - 30);
|
|
|
|
unsigned int vm_size;
|
2017-11-23 17:16:05 +07:00
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
/* adjust vm size first */
|
2017-11-23 18:57:18 +07:00
|
|
|
if (amdgpu_vm_size != -1) {
|
2017-11-04 22:51:44 +07:00
|
|
|
vm_size = amdgpu_vm_size;
|
2017-11-23 18:57:18 +07:00
|
|
|
if (vm_size > max_size) {
|
|
|
|
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
|
|
|
|
amdgpu_vm_size, max_size);
|
|
|
|
vm_size = max_size;
|
|
|
|
}
|
2018-08-22 04:14:32 +07:00
|
|
|
} else {
|
|
|
|
struct sysinfo si;
|
|
|
|
unsigned int phys_ram_gb;
|
|
|
|
|
|
|
|
/* Optimal VM size depends on the amount of physical
|
|
|
|
* RAM available. Underlying requirements and
|
|
|
|
* assumptions:
|
|
|
|
*
|
|
|
|
* - Need to map system memory and VRAM from all GPUs
|
|
|
|
* - VRAM from other GPUs not known here
|
|
|
|
* - Assume VRAM <= system memory
|
|
|
|
* - On GFX8 and older, VM space can be segmented for
|
|
|
|
* different MTYPEs
|
|
|
|
* - Need to allow room for fragmentation, guard pages etc.
|
|
|
|
*
|
|
|
|
* This adds up to a rough guess of system memory x3.
|
|
|
|
* Round up to power of two to maximize the available
|
|
|
|
* VM size with the given page table size.
|
|
|
|
*/
|
|
|
|
si_meminfo(&si);
|
|
|
|
phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
|
|
|
|
(1 << 30) - 1) >> 30;
|
|
|
|
vm_size = roundup_pow_of_two(
|
|
|
|
min(max(phys_ram_gb * 3, min_vm_size), max_size));
|
2017-11-23 18:57:18 +07:00
|
|
|
}
|
2017-11-04 22:51:44 +07:00
|
|
|
|
|
|
|
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
|
2017-11-23 17:16:05 +07:00
|
|
|
|
|
|
|
tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
|
2017-11-27 22:22:05 +07:00
|
|
|
if (amdgpu_vm_block_size != -1)
|
|
|
|
tmp >>= amdgpu_vm_block_size - 9;
|
2017-11-23 17:16:05 +07:00
|
|
|
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
|
|
|
|
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
|
2017-12-13 13:22:54 +07:00
|
|
|
switch (adev->vm_manager.num_level) {
|
|
|
|
case 3:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB2;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB1;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
adev->vm_manager.root_level = AMDGPU_VM_PDB0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
|
|
|
|
}
|
2017-11-22 23:00:35 +07:00
|
|
|
/* block size depends on vm size and hw setup*/
|
2017-11-27 22:22:05 +07:00
|
|
|
if (amdgpu_vm_block_size != -1)
|
2017-04-05 12:54:56 +07:00
|
|
|
adev->vm_manager.block_size =
|
2017-11-27 22:22:05 +07:00
|
|
|
min((unsigned)amdgpu_vm_block_size, max_bits
|
|
|
|
- AMDGPU_GPU_PAGE_SHIFT
|
|
|
|
- 9 * adev->vm_manager.num_level);
|
|
|
|
else if (adev->vm_manager.num_level > 1)
|
|
|
|
adev->vm_manager.block_size = 9;
|
2017-04-05 12:54:56 +07:00
|
|
|
else
|
2017-11-27 22:22:05 +07:00
|
|
|
adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
|
2017-04-05 12:54:56 +07:00
|
|
|
|
2017-11-22 23:00:35 +07:00
|
|
|
if (amdgpu_vm_fragment_size == -1)
|
|
|
|
adev->vm_manager.fragment_size = fragment_size_default;
|
|
|
|
else
|
|
|
|
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
|
2017-08-15 15:05:59 +07:00
|
|
|
|
2017-11-23 17:16:05 +07:00
|
|
|
DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
|
|
|
|
vm_size, adev->vm_manager.num_level + 1,
|
|
|
|
adev->vm_manager.block_size,
|
2017-11-04 22:51:44 +07:00
|
|
|
adev->vm_manager.fragment_size);
|
2017-04-05 12:54:56 +07:00
|
|
|
}
|
|
|
|
|
2018-09-06 10:51:23 +07:00
|
|
|
static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
|
|
|
|
{
|
|
|
|
struct amdgpu_retryfault_hashtable *fault_hash;
|
|
|
|
|
|
|
|
fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
|
|
|
|
if (!fault_hash)
|
|
|
|
return fault_hash;
|
|
|
|
|
|
|
|
INIT_CHASH_TABLE(fault_hash->hash,
|
|
|
|
AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
|
|
|
|
spin_lock_init(&fault_hash->lock);
|
|
|
|
fault_hash->count = 0;
|
|
|
|
|
|
|
|
return fault_hash;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_init - initialize a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
2017-06-09 22:26:57 +07:00
|
|
|
* @vm_context: Indicates if it GFX or Compute context
|
2018-06-14 03:01:38 +07:00
|
|
|
* @pasid: Process address space identifier
|
2015-04-21 03:55:21 +07:00
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Init @vm fields.
|
2018-06-11 22:11:24 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, error for failure.
|
2015-04-21 03:55:21 +07:00
|
|
|
*/
|
2017-06-09 22:26:57 +07:00
|
|
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
2017-08-26 07:40:26 +07:00
|
|
|
int vm_context, unsigned int pasid)
|
2015-04-21 03:55:21 +07:00
|
|
|
{
|
2018-04-16 17:27:50 +07:00
|
|
|
struct amdgpu_bo_param bp;
|
2018-04-24 11:14:39 +07:00
|
|
|
struct amdgpu_bo *root;
|
2017-04-20 15:17:34 +07:00
|
|
|
int r, i;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-09-09 06:15:08 +07:00
|
|
|
vm->va = RB_ROOT_CACHED;
|
2017-04-20 15:17:34 +07:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
|
|
|
vm->reserved_vmid[i] = NULL;
|
2017-08-03 19:02:13 +07:00
|
|
|
INIT_LIST_HEAD(&vm->evicted);
|
2017-08-09 19:15:46 +07:00
|
|
|
INIT_LIST_HEAD(&vm->relocated);
|
2017-08-01 16:27:36 +07:00
|
|
|
INIT_LIST_HEAD(&vm->moved);
|
2018-04-19 20:01:12 +07:00
|
|
|
INIT_LIST_HEAD(&vm->idle);
|
2018-09-01 18:25:31 +07:00
|
|
|
INIT_LIST_HEAD(&vm->invalidated);
|
|
|
|
spin_lock_init(&vm->invalidated_lock);
|
2015-04-21 03:55:21 +07:00
|
|
|
INIT_LIST_HEAD(&vm->freed);
|
2016-03-08 23:58:35 +07:00
|
|
|
|
2016-02-01 18:53:58 +07:00
|
|
|
/* create scheduler entity for page table updates */
|
2018-07-12 20:15:21 +07:00
|
|
|
r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
|
|
|
|
adev->vm_manager.vm_pte_num_rqs, NULL);
|
2016-02-01 18:53:58 +07:00
|
|
|
if (r)
|
2016-10-28 01:04:38 +07:00
|
|
|
return r;
|
2016-02-01 18:53:58 +07:00
|
|
|
|
2017-07-27 23:48:22 +07:00
|
|
|
vm->pte_support_ats = false;
|
|
|
|
|
|
|
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
|
2017-06-09 22:26:57 +07:00
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
2017-07-27 23:48:22 +07:00
|
|
|
|
2018-09-14 03:41:57 +07:00
|
|
|
if (adev->asic_type == CHIP_RAVEN)
|
2017-07-27 23:48:22 +07:00
|
|
|
vm->pte_support_ats = true;
|
2018-01-24 23:19:04 +07:00
|
|
|
} else {
|
2017-06-09 22:26:57 +07:00
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_GFX);
|
2018-01-24 23:19:04 +07:00
|
|
|
}
|
2017-06-09 22:26:57 +07:00
|
|
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
|
|
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
2018-06-13 01:28:20 +07:00
|
|
|
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
2017-06-09 22:26:57 +07:00
|
|
|
"CPU update of VM recommended only for large BAR system\n");
|
2017-09-08 19:09:41 +07:00
|
|
|
vm->last_update = NULL;
|
2015-08-15 01:08:40 +07:00
|
|
|
|
2018-08-28 03:17:59 +07:00
|
|
|
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
|
2018-09-06 07:19:54 +07:00
|
|
|
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
|
|
|
|
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
|
2018-04-24 11:14:39 +07:00
|
|
|
r = amdgpu_bo_create(adev, &bp, &root);
|
2015-04-21 03:55:21 +07:00
|
|
|
if (r)
|
2016-02-01 18:53:58 +07:00
|
|
|
goto error_free_sched_entity;
|
|
|
|
|
2018-04-24 11:14:39 +07:00
|
|
|
r = amdgpu_bo_reserve(root, true);
|
2018-01-24 20:57:02 +07:00
|
|
|
if (r)
|
|
|
|
goto error_free_root;
|
|
|
|
|
2018-09-21 23:09:59 +07:00
|
|
|
r = reservation_object_reserve_shared(root->tbo.resv, 1);
|
|
|
|
if (r)
|
|
|
|
goto error_unreserve;
|
|
|
|
|
2018-04-24 11:14:39 +07:00
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, root,
|
2018-01-26 00:36:15 +07:00
|
|
|
adev->vm_manager.root_level,
|
|
|
|
vm->pte_support_ats);
|
2018-01-24 23:19:04 +07:00
|
|
|
if (r)
|
|
|
|
goto error_unreserve;
|
|
|
|
|
2018-04-24 11:14:39 +07:00
|
|
|
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
|
2018-01-24 20:57:02 +07:00
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2017-08-26 07:40:26 +07:00
|
|
|
if (pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
if (r < 0)
|
|
|
|
goto error_free_root;
|
|
|
|
|
|
|
|
vm->pasid = pasid;
|
2017-07-12 15:01:48 +07:00
|
|
|
}
|
|
|
|
|
2018-09-06 10:51:23 +07:00
|
|
|
vm->fault_hash = init_fault_hash();
|
|
|
|
if (!vm->fault_hash) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto error_free_root;
|
|
|
|
}
|
|
|
|
|
2017-08-26 13:43:06 +07:00
|
|
|
INIT_KFIFO(vm->faults);
|
2017-09-22 03:26:41 +07:00
|
|
|
vm->fault_credit = 16;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
|
|
|
return 0;
|
2016-02-01 18:53:58 +07:00
|
|
|
|
2018-01-24 23:19:04 +07:00
|
|
|
error_unreserve:
|
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
|
|
|
|
2016-10-12 19:46:26 +07:00
|
|
|
error_free_root:
|
2017-08-03 19:02:13 +07:00
|
|
|
amdgpu_bo_unref(&vm->root.base.bo->shadow);
|
|
|
|
amdgpu_bo_unref(&vm->root.base.bo);
|
|
|
|
vm->root.base.bo = NULL;
|
2016-02-01 18:53:58 +07:00
|
|
|
|
|
|
|
error_free_sched_entity:
|
2018-07-20 19:21:05 +07:00
|
|
|
drm_sched_entity_destroy(&vm->entity);
|
2016-02-01 18:53:58 +07:00
|
|
|
|
|
|
|
return r;
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2018-03-16 04:27:42 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
|
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2018-03-16 04:27:42 +07:00
|
|
|
* This only works on GFX VMs that don't have any BOs added and no
|
|
|
|
* page tables allocated yet.
|
|
|
|
*
|
|
|
|
* Changes the following VM parameters:
|
|
|
|
* - use_cpu_for_update
|
|
|
|
* - pte_supports_ats
|
|
|
|
* - pasid (old PASID is released, because compute manages its own PASIDs)
|
|
|
|
*
|
|
|
|
* Reinitializes the page directory to reflect the changed ATS
|
2018-07-12 09:32:59 +07:00
|
|
|
* setting.
|
2018-03-16 04:27:42 +07:00
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* Returns:
|
|
|
|
* 0 for success, -errno for errors.
|
2018-03-16 04:27:42 +07:00
|
|
|
*/
|
2018-08-30 00:33:52 +07:00
|
|
|
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
|
2018-03-16 04:27:42 +07:00
|
|
|
{
|
2018-09-14 03:41:57 +07:00
|
|
|
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
2018-03-16 04:27:42 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = amdgpu_bo_reserve(vm->root.base.bo, true);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
/* Sanity checks */
|
|
|
|
if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
|
|
|
|
r = -EINVAL;
|
2018-08-30 00:33:52 +07:00
|
|
|
goto unreserve_bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
|
|
|
|
if (r == -ENOSPC)
|
|
|
|
goto unreserve_bo;
|
|
|
|
r = 0;
|
2018-03-16 04:27:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if PD needs to be reinitialized and do it before
|
|
|
|
* changing any other state, in case it fails.
|
|
|
|
*/
|
|
|
|
if (pte_support_ats != vm->pte_support_ats) {
|
|
|
|
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
|
|
|
|
adev->vm_manager.root_level,
|
|
|
|
pte_support_ats);
|
|
|
|
if (r)
|
2018-08-30 00:33:52 +07:00
|
|
|
goto free_idr;
|
2018-03-16 04:27:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update VM state */
|
|
|
|
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
|
|
|
vm->pte_support_ats = pte_support_ats;
|
|
|
|
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
|
|
|
vm->use_cpu_for_update ? "CPU" : "SDMA");
|
2018-06-13 01:28:20 +07:00
|
|
|
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
2018-03-16 04:27:42 +07:00
|
|
|
"CPU update of VM recommended only for large BAR system\n");
|
|
|
|
|
|
|
|
if (vm->pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
|
2018-08-30 00:33:52 +07:00
|
|
|
/* Free the original amdgpu allocated pasid
|
|
|
|
* Will be replaced with kfd allocated pasid
|
|
|
|
*/
|
|
|
|
amdgpu_pasid_free(vm->pasid);
|
2018-03-16 04:27:42 +07:00
|
|
|
vm->pasid = 0;
|
|
|
|
}
|
|
|
|
|
2018-07-12 09:32:59 +07:00
|
|
|
/* Free the shadow bo for compute VM */
|
|
|
|
amdgpu_bo_unref(&vm->root.base.bo->shadow);
|
|
|
|
|
2018-08-30 00:33:52 +07:00
|
|
|
if (pasid)
|
|
|
|
vm->pasid = pasid;
|
|
|
|
|
|
|
|
goto unreserve_bo;
|
|
|
|
|
|
|
|
free_idr:
|
|
|
|
if (pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
}
|
|
|
|
unreserve_bo:
|
2018-03-16 04:27:42 +07:00
|
|
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-08-28 02:18:36 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_release_compute - release a compute vm
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
|
|
|
|
*
|
|
|
|
* This is a correspondant of amdgpu_vm_make_compute. It decouples compute
|
|
|
|
* pasid from vm. Compute should stop use of vm after this call.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
if (vm->pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
}
|
|
|
|
vm->pasid = 0;
|
|
|
|
}
|
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_fini - tear down a vm instance
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @vm: requested vm
|
|
|
|
*
|
2016-01-26 18:17:11 +07:00
|
|
|
* Tear down @vm.
|
2015-04-21 03:55:21 +07:00
|
|
|
* Unbind the VM and remove all bos from the vm bo list
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
struct amdgpu_bo_va_mapping *mapping, *tmp;
|
2018-01-12 21:26:08 +07:00
|
|
|
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
|
2017-10-13 22:24:31 +07:00
|
|
|
struct amdgpu_bo *root;
|
2017-08-26 13:43:06 +07:00
|
|
|
u64 fault;
|
2017-10-13 22:24:31 +07:00
|
|
|
int i, r;
|
2015-04-21 03:55:21 +07:00
|
|
|
|
2018-03-16 04:27:43 +07:00
|
|
|
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
|
|
|
|
2017-08-26 13:43:06 +07:00
|
|
|
/* Clear pending page faults from IH when the VM is destroyed */
|
|
|
|
while (kfifo_get(&vm->faults, &fault))
|
2018-09-06 10:51:23 +07:00
|
|
|
amdgpu_vm_clear_fault(vm->fault_hash, fault);
|
2017-08-26 13:43:06 +07:00
|
|
|
|
2017-08-26 07:40:26 +07:00
|
|
|
if (vm->pasid) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
|
|
|
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
|
|
|
}
|
|
|
|
|
2018-09-06 10:51:23 +07:00
|
|
|
kfree(vm->fault_hash);
|
|
|
|
vm->fault_hash = NULL;
|
|
|
|
|
2018-07-20 19:21:05 +07:00
|
|
|
drm_sched_entity_destroy(&vm->entity);
|
2016-02-01 18:53:58 +07:00
|
|
|
|
2017-09-09 06:15:08 +07:00
|
|
|
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
2015-04-21 03:55:21 +07:00
|
|
|
dev_err(adev->dev, "still active bo inside vm\n");
|
|
|
|
}
|
2017-09-09 06:15:08 +07:00
|
|
|
rbtree_postorder_for_each_entry_safe(mapping, tmp,
|
|
|
|
&vm->va.rb_root, rb) {
|
2018-10-18 19:29:28 +07:00
|
|
|
/* Don't remove the mapping here, we don't want to trigger a
|
|
|
|
* rebalance and the tree is about to be destroyed anyway.
|
|
|
|
*/
|
2015-04-21 03:55:21 +07:00
|
|
|
list_del(&mapping->list);
|
|
|
|
kfree(mapping);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
2017-03-13 16:13:36 +07:00
|
|
|
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
2017-02-14 22:02:52 +07:00
|
|
|
amdgpu_vm_prt_fini(adev, vm);
|
2017-03-13 16:13:36 +07:00
|
|
|
prt_fini_needed = false;
|
2017-02-14 22:02:52 +07:00
|
|
|
}
|
2017-01-30 17:09:31 +07:00
|
|
|
|
2015-04-21 03:55:21 +07:00
|
|
|
list_del(&mapping->list);
|
2017-02-14 22:02:52 +07:00
|
|
|
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
|
|
|
|
2017-10-13 22:24:31 +07:00
|
|
|
root = amdgpu_bo_ref(vm->root.base.bo);
|
|
|
|
r = amdgpu_bo_reserve(root, true);
|
|
|
|
if (r) {
|
|
|
|
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
|
|
|
|
} else {
|
2018-09-06 20:35:13 +07:00
|
|
|
amdgpu_vm_free_pts(adev, vm);
|
2017-10-13 22:24:31 +07:00
|
|
|
amdgpu_bo_unreserve(root);
|
|
|
|
}
|
|
|
|
amdgpu_bo_unref(&root);
|
2017-09-08 19:09:41 +07:00
|
|
|
dma_fence_put(vm->last_update);
|
2017-04-20 15:18:48 +07:00
|
|
|
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
2017-12-18 22:53:03 +07:00
|
|
|
amdgpu_vmid_free_reserved(adev, vm, i);
|
2015-04-21 03:55:21 +07:00
|
|
|
}
|
2015-11-16 02:52:06 +07:00
|
|
|
|
2017-09-22 03:26:41 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
* @pasid: PASID do identify the VM
|
|
|
|
*
|
2018-06-11 22:11:24 +07:00
|
|
|
* This function is expected to be called in interrupt context.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* True if there was fault credit, false otherwise
|
2017-09-22 03:26:41 +07:00
|
|
|
*/
|
|
|
|
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
|
|
|
|
unsigned int pasid)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
|
|
|
|
spin_lock(&adev->vm_manager.pasid_lock);
|
|
|
|
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
2018-01-10 01:18:59 +07:00
|
|
|
if (!vm) {
|
2017-09-22 03:26:41 +07:00
|
|
|
/* VM not found, can't track fault credit */
|
2018-01-10 01:18:59 +07:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 03:26:41 +07:00
|
|
|
return true;
|
2018-01-10 01:18:59 +07:00
|
|
|
}
|
2017-09-22 03:26:41 +07:00
|
|
|
|
|
|
|
/* No lock needed. only accessed by IRQ handler */
|
2018-01-10 01:18:59 +07:00
|
|
|
if (!vm->fault_credit) {
|
2017-09-22 03:26:41 +07:00
|
|
|
/* Too many faults in this VM */
|
2018-01-10 01:18:59 +07:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 03:26:41 +07:00
|
|
|
return false;
|
2018-01-10 01:18:59 +07:00
|
|
|
}
|
2017-09-22 03:26:41 +07:00
|
|
|
|
|
|
|
vm->fault_credit--;
|
2018-01-10 01:18:59 +07:00
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
2017-09-22 03:26:41 +07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-21 16:19:11 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_init - init the VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Initialize the VM manager structures
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-12-18 22:53:03 +07:00
|
|
|
unsigned i;
|
2016-01-21 16:19:11 +07:00
|
|
|
|
2017-12-18 22:53:03 +07:00
|
|
|
amdgpu_vmid_mgr_init(adev);
|
2016-02-08 23:37:38 +07:00
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
adev->vm_manager.fence_context =
|
|
|
|
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
2016-06-01 15:47:36 +07:00
|
|
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
|
|
|
adev->vm_manager.seqno[i] = 0;
|
|
|
|
|
2017-01-30 17:09:31 +07:00
|
|
|
spin_lock_init(&adev->vm_manager.prt_lock);
|
2017-02-14 22:02:52 +07:00
|
|
|
atomic_set(&adev->vm_manager.num_prt_users, 0);
|
2017-06-09 22:26:57 +07:00
|
|
|
|
|
|
|
/* If not overridden by the user, by default, only in large BAR systems
|
|
|
|
* Compute VM tables will be updated by CPU
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (amdgpu_vm_update_mode == -1) {
|
2018-06-13 01:28:20 +07:00
|
|
|
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
2017-06-09 22:26:57 +07:00
|
|
|
adev->vm_manager.vm_update_mode =
|
|
|
|
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
|
|
|
|
else
|
|
|
|
adev->vm_manager.vm_update_mode = 0;
|
|
|
|
} else
|
|
|
|
adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
|
|
|
|
#else
|
|
|
|
adev->vm_manager.vm_update_mode = 0;
|
|
|
|
#endif
|
|
|
|
|
2017-08-26 07:40:26 +07:00
|
|
|
idr_init(&adev->vm_manager.pasid_idr);
|
|
|
|
spin_lock_init(&adev->vm_manager.pasid_lock);
|
2016-01-21 16:19:11 +07:00
|
|
|
}
|
|
|
|
|
2015-11-16 02:52:06 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_manager_fini - cleanup VM manager
|
|
|
|
*
|
|
|
|
* @adev: amdgpu_device pointer
|
|
|
|
*
|
|
|
|
* Cleanup the VM manager and free resources.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
|
|
|
{
|
2017-08-26 07:40:26 +07:00
|
|
|
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
|
|
|
|
idr_destroy(&adev->vm_manager.pasid_idr);
|
|
|
|
|
2017-12-18 22:53:03 +07:00
|
|
|
amdgpu_vmid_mgr_fini(adev);
|
2015-11-16 02:52:06 +07:00
|
|
|
}
|
2017-04-24 10:09:04 +07:00
|
|
|
|
2018-06-11 22:11:24 +07:00
|
|
|
/**
|
|
|
|
* amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
|
|
|
|
*
|
|
|
|
* @dev: drm device pointer
|
|
|
|
* @data: drm_amdgpu_vm
|
|
|
|
* @filp: drm file pointer
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 for success, -errno for errors.
|
|
|
|
*/
|
2017-04-24 10:09:04 +07:00
|
|
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
|
|
{
|
|
|
|
union drm_amdgpu_vm *args = data;
|
2017-04-20 15:18:48 +07:00
|
|
|
struct amdgpu_device *adev = dev->dev_private;
|
|
|
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
|
|
|
int r;
|
2017-04-24 10:09:04 +07:00
|
|
|
|
|
|
|
switch (args->in.op) {
|
|
|
|
case AMDGPU_VM_OP_RESERVE_VMID:
|
2017-04-20 15:18:48 +07:00
|
|
|
/* current, we only have requirement to reserve vmid from gfxhub */
|
2017-12-18 22:53:03 +07:00
|
|
|
r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
2017-04-20 15:18:48 +07:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
break;
|
2017-04-24 10:09:04 +07:00
|
|
|
case AMDGPU_VM_OP_UNRESERVE_VMID:
|
2017-12-18 22:53:03 +07:00
|
|
|
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
|
2017-04-24 10:09:04 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-06-29 09:51:32 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_get_task_info - Extracts task info for a PASID.
|
|
|
|
*
|
2018-09-06 09:10:57 +07:00
|
|
|
* @adev: drm device pointer
|
2018-06-29 09:51:32 +07:00
|
|
|
* @pasid: PASID identifier for VM
|
|
|
|
* @task_info: task_info to fill.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
|
|
|
struct amdgpu_task_info *task_info)
|
|
|
|
{
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
|
|
|
|
spin_lock(&adev->vm_manager.pasid_lock);
|
|
|
|
|
|
|
|
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
|
|
|
if (vm)
|
|
|
|
*task_info = vm->task_info;
|
|
|
|
|
|
|
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_set_task_info - Sets VMs task info.
|
|
|
|
*
|
|
|
|
* @vm: vm for which to set the info
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
|
|
|
{
|
|
|
|
if (!vm->task_info.pid) {
|
|
|
|
vm->task_info.pid = current->pid;
|
|
|
|
get_task_comm(vm->task_info.task_name, current);
|
|
|
|
|
|
|
|
if (current->group_leader->mm == current->mm) {
|
|
|
|
vm->task_info.tgid = current->group_leader->pid;
|
|
|
|
get_task_comm(vm->task_info.process_name, current->group_leader);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-06 10:51:23 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_add_fault - Add a page fault record to fault hash table
|
|
|
|
*
|
|
|
|
* @fault_hash: fault hash table
|
|
|
|
* @key: 64-bit encoding of PASID and address
|
|
|
|
*
|
|
|
|
* This should be called when a retry page fault interrupt is
|
|
|
|
* received. If this is a new page fault, it will be added to a hash
|
|
|
|
* table. The return value indicates whether this is a new fault, or
|
|
|
|
* a fault that was already known and is already being handled.
|
|
|
|
*
|
|
|
|
* If there are too many pending page faults, this will fail. Retry
|
|
|
|
* interrupts should be ignored in this case until there is enough
|
|
|
|
* free space.
|
|
|
|
*
|
|
|
|
* Returns 0 if the fault was added, 1 if the fault was already known,
|
|
|
|
* -ENOSPC if there are too many pending faults.
|
|
|
|
*/
|
|
|
|
int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int r = -ENOSPC;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!fault_hash))
|
|
|
|
/* Should be allocated in amdgpu_vm_init
|
|
|
|
*/
|
|
|
|
return r;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fault_hash->lock, flags);
|
|
|
|
|
|
|
|
/* Only let the hash table fill up to 50% for best performance */
|
|
|
|
if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
|
|
|
|
goto unlock_out;
|
|
|
|
|
|
|
|
r = chash_table_copy_in(&fault_hash->hash, key, NULL);
|
|
|
|
if (!r)
|
|
|
|
fault_hash->count++;
|
|
|
|
|
|
|
|
/* chash_table_copy_in should never fail unless we're losing count */
|
|
|
|
WARN_ON_ONCE(r < 0);
|
|
|
|
|
|
|
|
unlock_out:
|
|
|
|
spin_unlock_irqrestore(&fault_hash->lock, flags);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* amdgpu_vm_clear_fault - Remove a page fault record
|
|
|
|
*
|
|
|
|
* @fault_hash: fault hash table
|
|
|
|
* @key: 64-bit encoding of PASID and address
|
|
|
|
*
|
|
|
|
* This should be called when a page fault has been handled. Any
|
|
|
|
* future interrupt with this key will be processed as a new
|
|
|
|
* page fault.
|
|
|
|
*/
|
|
|
|
void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!fault_hash)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fault_hash->lock, flags);
|
|
|
|
|
|
|
|
r = chash_table_remove(&fault_hash->hash, key, NULL);
|
|
|
|
if (!WARN_ON_ONCE(r < 0)) {
|
|
|
|
fault_hash->count--;
|
|
|
|
WARN_ON_ONCE(fault_hash->count < 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fault_hash->lock, flags);
|
|
|
|
}
|