2016-09-28 20:41:50 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Christian König
|
|
|
|
*/
|
|
|
|
#ifndef __AMDGPU_VM_H__
|
|
|
|
#define __AMDGPU_VM_H__
|
|
|
|
|
2017-08-26 07:40:26 +07:00
|
|
|
#include <linux/idr.h>
|
2017-12-06 23:49:39 +07:00
|
|
|
#include <linux/kfifo.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <drm/gpu_scheduler.h>
|
2018-02-07 08:32:32 +07:00
|
|
|
#include <drm/drm_file.h>
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
#include <drm/ttm/ttm_bo_driver.h>
|
2016-09-28 20:41:50 +07:00
|
|
|
|
|
|
|
#include "amdgpu_sync.h"
|
|
|
|
#include "amdgpu_ring.h"
|
2017-12-18 22:53:03 +07:00
|
|
|
#include "amdgpu_ids.h"
|
2016-09-28 20:41:50 +07:00
|
|
|
|
|
|
|
struct amdgpu_bo_va;
|
|
|
|
struct amdgpu_job;
|
|
|
|
struct amdgpu_bo_list_entry;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GPUVM handling
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Maximum number of PTEs the hardware can write with one command */
|
|
|
|
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
|
|
|
|
|
|
|
|
/* number of entries in page table */
|
2017-03-29 15:08:32 +07:00
|
|
|
#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-02-13 20:22:58 +07:00
|
|
|
#define AMDGPU_PTE_VALID (1ULL << 0)
|
|
|
|
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
|
|
|
|
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
|
2016-09-28 20:41:50 +07:00
|
|
|
|
|
|
|
/* VI only */
|
2017-02-13 20:22:58 +07:00
|
|
|
#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-02-13 20:22:58 +07:00
|
|
|
#define AMDGPU_PTE_READABLE (1ULL << 5)
|
|
|
|
#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-02-16 02:10:19 +07:00
|
|
|
#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-04-19 08:53:29 +07:00
|
|
|
/* TILED for VEGA10, reserved for older ASICs */
|
|
|
|
#define AMDGPU_PTE_PRT (1ULL << 51)
|
2017-01-30 17:09:31 +07:00
|
|
|
|
2017-07-26 03:35:38 +07:00
|
|
|
/* PDE is handled as PTE for VEGA10 */
|
|
|
|
#define AMDGPU_PDE_PTE (1ULL << 54)
|
|
|
|
|
2019-02-22 14:34:00 +07:00
|
|
|
#define AMDGPU_PTE_LOG (1ULL << 55)
|
|
|
|
|
2017-12-05 21:23:26 +07:00
|
|
|
/* PTE is handled as PDE for VEGA10 (Translate Further) */
|
|
|
|
#define AMDGPU_PTE_TF (1ULL << 56)
|
|
|
|
|
|
|
|
/* PDE Block Fragment Size for VEGA10 */
|
|
|
|
#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
|
|
|
|
|
2018-05-14 23:15:27 +07:00
|
|
|
|
|
|
|
/* For GFX9 */
|
2018-06-25 20:03:40 +07:00
|
|
|
#define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
|
|
|
|
#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
|
2017-03-04 03:23:14 +07:00
|
|
|
|
2018-05-14 23:15:27 +07:00
|
|
|
#define AMDGPU_MTYPE_NC 0
|
2017-09-01 02:55:00 +07:00
|
|
|
#define AMDGPU_MTYPE_CC 2
|
|
|
|
|
|
|
|
#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
|
|
|
|
| AMDGPU_PTE_SNOOPED \
|
|
|
|
| AMDGPU_PTE_EXECUTABLE \
|
|
|
|
| AMDGPU_PTE_READABLE \
|
|
|
|
| AMDGPU_PTE_WRITEABLE \
|
2018-06-25 20:03:40 +07:00
|
|
|
| AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
|
2017-09-01 02:55:00 +07:00
|
|
|
|
2019-07-16 17:25:01 +07:00
|
|
|
/* gfx10 */
|
2019-04-13 06:17:24 +07:00
|
|
|
#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
|
|
|
|
#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
|
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
/* How to programm VM fault handling */
|
|
|
|
#define AMDGPU_VM_FAULT_STOP_NEVER 0
|
|
|
|
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
|
|
|
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
|
|
|
|
2019-08-30 19:38:37 +07:00
|
|
|
/* Reserve 4MB VRAM for page tables */
|
|
|
|
#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
|
|
|
|
|
2017-03-30 19:41:19 +07:00
|
|
|
/* max number of VMHUB */
|
2018-08-31 13:46:47 +07:00
|
|
|
#define AMDGPU_MAX_VMHUBS 3
|
2019-07-17 01:29:19 +07:00
|
|
|
#define AMDGPU_GFXHUB_0 0
|
|
|
|
#define AMDGPU_MMHUB_0 1
|
2018-08-31 13:46:47 +07:00
|
|
|
#define AMDGPU_MMHUB_1 2
|
2017-03-30 19:41:19 +07:00
|
|
|
|
|
|
|
/* hardcode that limit for now */
|
2018-01-22 17:17:18 +07:00
|
|
|
#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
|
2017-11-06 21:25:37 +07:00
|
|
|
|
2017-04-21 14:51:04 +07:00
|
|
|
/* max vmids dedicated for process */
|
|
|
|
#define AMDGPU_VM_MAX_RESERVED_VMID 1
|
2017-03-30 19:41:19 +07:00
|
|
|
|
2017-06-09 22:26:57 +07:00
|
|
|
#define AMDGPU_VM_CONTEXT_GFX 0
|
|
|
|
#define AMDGPU_VM_CONTEXT_COMPUTE 1
|
|
|
|
|
|
|
|
/* See vm_update_mode */
|
|
|
|
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
|
|
|
|
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
|
|
|
|
|
2017-12-13 13:22:54 +07:00
|
|
|
/* VMPT level enumerate, and the hiberachy is:
|
|
|
|
* PDB2->PDB1->PDB0->PTB
|
|
|
|
*/
|
|
|
|
enum amdgpu_vm_level {
|
|
|
|
AMDGPU_VM_PDB2,
|
|
|
|
AMDGPU_VM_PDB1,
|
|
|
|
AMDGPU_VM_PDB0,
|
|
|
|
AMDGPU_VM_PTB
|
|
|
|
};
|
|
|
|
|
2017-08-01 15:51:43 +07:00
|
|
|
/* base structure for tracking BO usage in a VM */
|
|
|
|
struct amdgpu_vm_bo_base {
|
|
|
|
/* constant after initialization */
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
struct amdgpu_bo *bo;
|
|
|
|
|
|
|
|
/* protected by bo being reserved */
|
2018-09-11 01:02:46 +07:00
|
|
|
struct amdgpu_vm_bo_base *next;
|
2017-08-01 15:51:43 +07:00
|
|
|
|
|
|
|
/* protected by spinlock */
|
|
|
|
struct list_head vm_status;
|
2017-08-23 21:13:33 +07:00
|
|
|
|
|
|
|
/* protected by the BO being reserved */
|
|
|
|
bool moved;
|
2017-08-01 15:51:43 +07:00
|
|
|
};
|
2017-06-09 22:26:57 +07:00
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
struct amdgpu_vm_pt {
|
2017-08-03 19:02:13 +07:00
|
|
|
struct amdgpu_vm_bo_base base;
|
2016-10-12 19:46:26 +07:00
|
|
|
|
|
|
|
/* array of page tables, one for each directory entry */
|
2017-08-03 19:02:13 +07:00
|
|
|
struct amdgpu_vm_pt *entries;
|
2016-09-28 20:41:50 +07:00
|
|
|
};
|
|
|
|
|
2018-08-03 18:06:02 +07:00
|
|
|
/* provided by hw blocks that can write ptes, e.g., sdma */
|
|
|
|
struct amdgpu_vm_pte_funcs {
|
|
|
|
/* number of dw to reserve per operation */
|
|
|
|
unsigned copy_pte_num_dw;
|
|
|
|
|
|
|
|
/* copy pte entries from GART */
|
|
|
|
void (*copy_pte)(struct amdgpu_ib *ib,
|
|
|
|
uint64_t pe, uint64_t src,
|
|
|
|
unsigned count);
|
|
|
|
|
|
|
|
/* write pte one entry at a time with addr mapping */
|
|
|
|
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
|
|
|
|
uint64_t value, unsigned count,
|
|
|
|
uint32_t incr);
|
|
|
|
/* for linear pte/pde updates without addr mapping */
|
|
|
|
void (*set_pte_pde)(struct amdgpu_ib *ib,
|
|
|
|
uint64_t pe,
|
|
|
|
uint64_t addr, unsigned count,
|
|
|
|
uint32_t incr, uint64_t flags);
|
|
|
|
};
|
|
|
|
|
2018-06-29 09:51:32 +07:00
|
|
|
struct amdgpu_task_info {
|
|
|
|
char process_name[TASK_COMM_LEN];
|
|
|
|
char task_name[TASK_COMM_LEN];
|
|
|
|
pid_t pid;
|
|
|
|
pid_t tgid;
|
|
|
|
};
|
|
|
|
|
2019-03-18 18:27:35 +07:00
|
|
|
/**
|
|
|
|
* struct amdgpu_vm_update_params
|
|
|
|
*
|
|
|
|
* Encapsulate some VM table update parameters to reduce
|
|
|
|
* the number of function parameters
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct amdgpu_vm_update_params {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @adev: amdgpu device we do this update for
|
|
|
|
*/
|
|
|
|
struct amdgpu_device *adev;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @vm: optional amdgpu_vm we do this update for
|
|
|
|
*/
|
|
|
|
struct amdgpu_vm *vm;
|
|
|
|
|
2019-09-16 22:33:28 +07:00
|
|
|
/**
|
|
|
|
* @direct: if changes should be made directly
|
|
|
|
*/
|
|
|
|
bool direct;
|
|
|
|
|
2019-03-18 18:27:35 +07:00
|
|
|
/**
|
|
|
|
* @pages_addr:
|
|
|
|
*
|
|
|
|
* DMA addresses to use for mapping
|
|
|
|
*/
|
|
|
|
dma_addr_t *pages_addr;
|
|
|
|
|
2019-03-18 19:16:03 +07:00
|
|
|
/**
|
|
|
|
* @job: job to used for hw submission
|
|
|
|
*/
|
|
|
|
struct amdgpu_job *job;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @num_dw_left: number of dw left for the IB
|
|
|
|
*/
|
|
|
|
unsigned int num_dw_left;
|
2019-03-18 18:27:35 +07:00
|
|
|
};
|
|
|
|
|
2019-03-18 19:16:03 +07:00
|
|
|
struct amdgpu_vm_update_funcs {
|
2019-03-21 22:34:18 +07:00
|
|
|
int (*map_table)(struct amdgpu_bo *bo);
|
2019-03-18 19:16:03 +07:00
|
|
|
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
|
|
|
|
struct dma_fence *exclusive);
|
|
|
|
int (*update)(struct amdgpu_vm_update_params *p,
|
|
|
|
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
|
|
|
|
unsigned count, uint32_t incr, uint64_t flags);
|
|
|
|
int (*commit)(struct amdgpu_vm_update_params *p,
|
|
|
|
struct dma_fence **fence);
|
|
|
|
};
|
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
struct amdgpu_vm {
|
|
|
|
/* tree of virtual addresses mapped */
|
2017-09-09 06:15:08 +07:00
|
|
|
struct rb_root_cached va;
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-08-03 19:02:13 +07:00
|
|
|
/* BOs who needs a validation */
|
|
|
|
struct list_head evicted;
|
|
|
|
|
2017-08-09 19:15:46 +07:00
|
|
|
/* PT BOs which relocated and their parent need an update */
|
|
|
|
struct list_head relocated;
|
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
/* per VM BOs moved, but not yet updated in the PT */
|
2017-08-01 16:27:36 +07:00
|
|
|
struct list_head moved;
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2018-04-19 20:01:12 +07:00
|
|
|
/* All BOs of this VM not currently in the state machine */
|
|
|
|
struct list_head idle;
|
|
|
|
|
2018-09-01 18:25:31 +07:00
|
|
|
/* regular invalidated BOs, but not yet updated in the PT */
|
|
|
|
struct list_head invalidated;
|
|
|
|
spinlock_t invalidated_lock;
|
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
/* BO mappings freed, but not yet updated in the PT */
|
|
|
|
struct list_head freed;
|
|
|
|
|
|
|
|
/* contains the page directory */
|
2016-10-12 19:46:26 +07:00
|
|
|
struct amdgpu_vm_pt root;
|
2017-09-08 19:09:41 +07:00
|
|
|
struct dma_fence *last_update;
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2019-07-19 19:41:12 +07:00
|
|
|
/* Scheduler entities for page table updates */
|
|
|
|
struct drm_sched_entity direct;
|
|
|
|
struct drm_sched_entity delayed;
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2017-08-26 07:40:26 +07:00
|
|
|
unsigned int pasid;
|
2017-04-20 15:17:34 +07:00
|
|
|
/* dedicated to vm */
|
2017-12-18 22:53:03 +07:00
|
|
|
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
2017-06-09 22:26:57 +07:00
|
|
|
|
|
|
|
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
2019-03-18 19:16:03 +07:00
|
|
|
bool use_cpu_for_update;
|
|
|
|
|
|
|
|
/* Functions to use for VM table updates */
|
|
|
|
const struct amdgpu_vm_update_funcs *update_funcs;
|
2017-07-27 23:48:22 +07:00
|
|
|
|
|
|
|
/* Flag to indicate ATS support from PTE for GFX9 */
|
|
|
|
bool pte_support_ats;
|
2017-08-26 13:43:06 +07:00
|
|
|
|
2017-09-22 03:26:41 +07:00
|
|
|
/* Up to 128 pending retry page faults */
|
2017-08-26 13:43:06 +07:00
|
|
|
DECLARE_KFIFO(faults, u64, 128);
|
2017-09-22 03:26:41 +07:00
|
|
|
|
2018-03-16 04:27:40 +07:00
|
|
|
/* Points to the KFD process VM info */
|
|
|
|
struct amdkfd_process_info *process_info;
|
|
|
|
|
|
|
|
/* List node in amdkfd_process_info.vm_list_head */
|
|
|
|
struct list_head vm_list_node;
|
|
|
|
|
|
|
|
/* Valid while the PD is reserved or fenced */
|
|
|
|
uint64_t pd_phys_addr;
|
2018-06-29 09:51:32 +07:00
|
|
|
|
|
|
|
/* Some basic info about the task */
|
|
|
|
struct amdgpu_task_info task_info;
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
|
|
|
|
/* Store positions of group of BOs */
|
|
|
|
struct ttm_lru_bulk_move lru_bulk_move;
|
|
|
|
/* mark whether can do the bulk move */
|
|
|
|
bool bulk_moveable;
|
2016-09-28 20:41:50 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct amdgpu_vm_manager {
|
|
|
|
/* Handling of VMIDs */
|
2017-12-18 22:53:03 +07:00
|
|
|
struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
|
2016-09-28 20:41:50 +07:00
|
|
|
|
|
|
|
/* Handling of VM fences */
|
|
|
|
u64 fence_context;
|
|
|
|
unsigned seqno[AMDGPU_MAX_RINGS];
|
|
|
|
|
2017-03-29 07:24:53 +07:00
|
|
|
uint64_t max_pfn;
|
2016-10-17 20:08:10 +07:00
|
|
|
uint32_t num_level;
|
2017-03-29 15:08:32 +07:00
|
|
|
uint32_t block_size;
|
2017-08-11 19:00:41 +07:00
|
|
|
uint32_t fragment_size;
|
2017-12-13 13:22:54 +07:00
|
|
|
enum amdgpu_vm_level root_level;
|
2016-09-28 20:41:50 +07:00
|
|
|
/* vram base address for page table entry */
|
|
|
|
u64 vram_base_offset;
|
|
|
|
/* vm pte handling */
|
2018-07-12 20:15:21 +07:00
|
|
|
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
|
|
|
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
|
|
|
|
unsigned vm_pte_num_rqs;
|
2019-03-25 22:13:44 +07:00
|
|
|
struct amdgpu_ring *page_fault;
|
2017-01-30 17:09:31 +07:00
|
|
|
|
|
|
|
/* partial resident texture handling */
|
|
|
|
spinlock_t prt_lock;
|
2017-02-14 22:02:52 +07:00
|
|
|
atomic_t num_prt_users;
|
2017-06-09 22:26:57 +07:00
|
|
|
|
|
|
|
/* controls how VM page tables are updated for Graphics and Compute.
|
|
|
|
* BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
|
|
|
|
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
|
|
|
|
*/
|
|
|
|
int vm_update_mode;
|
2017-08-26 07:40:26 +07:00
|
|
|
|
|
|
|
/* PASID to VM mapping, will be used in interrupt context to
|
|
|
|
* look up VM of a page fault
|
|
|
|
*/
|
|
|
|
struct idr pasid_idr;
|
|
|
|
spinlock_t pasid_lock;
|
2019-03-21 03:14:56 +07:00
|
|
|
|
|
|
|
/* counter of mapped memory through xgmi */
|
|
|
|
uint32_t xgmi_map_counter;
|
|
|
|
struct mutex lock_pstate;
|
2016-09-28 20:41:50 +07:00
|
|
|
};
|
|
|
|
|
2018-08-03 18:06:02 +07:00
|
|
|
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
|
|
|
|
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
|
|
|
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
|
|
|
|
2019-03-18 19:16:03 +07:00
|
|
|
extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
|
|
|
|
extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
|
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
|
|
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
2019-01-10 22:48:23 +07:00
|
|
|
|
|
|
|
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
2017-06-09 22:26:57 +07:00
|
|
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
2017-08-26 07:40:26 +07:00
|
|
|
int vm_context, unsigned int pasid);
|
2018-08-30 00:33:52 +07:00
|
|
|
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
|
2018-08-28 02:18:36 +07:00
|
|
|
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
2016-09-28 20:41:50 +07:00
|
|
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
|
|
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
|
|
|
struct list_head *validated,
|
|
|
|
struct amdgpu_bo_list_entry *entry);
|
2017-08-03 19:02:13 +07:00
|
|
|
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
|
2016-09-28 20:41:50 +07:00
|
|
|
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|
|
|
int (*callback)(void *p, struct amdgpu_bo *bo),
|
|
|
|
void *param);
|
2017-06-06 16:25:13 +07:00
|
|
|
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
|
2019-03-14 15:10:01 +07:00
|
|
|
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm, bool direct);
|
2016-09-28 20:41:50 +07:00
|
|
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
2017-03-24 01:36:31 +07:00
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct dma_fence **fence);
|
2017-08-16 16:13:48 +07:00
|
|
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
2017-09-11 21:54:59 +07:00
|
|
|
struct amdgpu_vm *vm);
|
2016-09-28 20:41:50 +07:00
|
|
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
bool clear);
|
|
|
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
2017-08-03 19:02:13 +07:00
|
|
|
struct amdgpu_bo *bo, bool evicted);
|
2019-03-18 19:16:03 +07:00
|
|
|
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
2016-09-28 20:41:50 +07:00
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo);
|
|
|
|
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
struct amdgpu_bo *bo);
|
|
|
|
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t addr, uint64_t offset,
|
2017-01-18 20:49:43 +07:00
|
|
|
uint64_t size, uint64_t flags);
|
2017-03-13 16:13:39 +07:00
|
|
|
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t addr, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t flags);
|
2016-09-28 20:41:50 +07:00
|
|
|
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va,
|
|
|
|
uint64_t addr);
|
2017-03-13 16:13:38 +07:00
|
|
|
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm,
|
|
|
|
uint64_t saddr, uint64_t size);
|
2017-09-06 21:55:16 +07:00
|
|
|
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
|
|
|
uint64_t addr);
|
2018-07-27 21:56:34 +07:00
|
|
|
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
|
2016-09-28 20:41:50 +07:00
|
|
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_bo_va *bo_va);
|
2018-08-22 04:14:32 +07:00
|
|
|
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
2017-11-23 18:57:18 +07:00
|
|
|
uint32_t fragment_size_default, unsigned max_level,
|
|
|
|
unsigned max_bits);
|
2017-04-24 10:09:04 +07:00
|
|
|
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
2017-05-12 01:52:48 +07:00
|
|
|
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
|
|
|
|
struct amdgpu_job *job);
|
2017-06-01 20:42:59 +07:00
|
|
|
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
2016-09-28 20:41:50 +07:00
|
|
|
|
2018-06-29 09:51:32 +07:00
|
|
|
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
struct amdgpu_task_info *task_info);
|
2018-12-07 21:18:43 +07:00
|
|
|
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
|
|
|
|
uint64_t addr);
|
2018-06-29 09:51:32 +07:00
|
|
|
|
|
|
|
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
|
|
|
|
drm/amdgpu: use bulk moves for efficient VM LRU handling (v6)
I continue to work for bulk moving that based on the proposal by Christian.
Background:
amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of
them on the end of LRU list one by one. Thus, that cause so many BOs moved to
the end of the LRU, and impact performance seriously.
Then Christian provided a workaround to not move PD/PT BOs on LRU with below
patch:
Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid
validating VM PTs")
However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU
instead of one by one.
Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be
validated we move all BOs together to the end of the LRU without dropping the
lock for the LRU.
While doing so we note the beginning and end of this block in the LRU list.
Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do,
we don't move every BO one by one, but instead cut the LRU list into pieces so
that we bulk move everything to the end in just one operation.
Test data:
+--------------+-----------------+-----------+---------------------------------------+
| |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) |
| |Principle(Vulkan)| | |
+------------------------------------------------------------------------------------+
| | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) |
| Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) |
+------------------------------------------------------------------------------------+
| Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) |
|(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)|
|PT BOs on LRU)| | | |
+------------------------------------------------------------------------------------+
| Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) |
| | | |0.214 ms(8K) 0.225 ms(16K) |
+--------------+-----------------+-----------+---------------------------------------+
After test them with above three benchmarks include vulkan and opencl. We can
see the visible improvement than original, and even better than original with
workaround.
v2: move all BOs include idle, relocated, and moved list to the end of LRU and
put them together.
v3: remove unused parameter and use list_for_each_entry instead of the one with
save entry.
v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time,
all bo will be back on idle list.
v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of
validated, and move ttm_bo_bulk_move_lru_tail() also into
amdgpu_vm_move_to_lru_tail().
v6: clean up and fix return value.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-08-06 09:57:08 +07:00
|
|
|
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
|
|
|
struct amdgpu_vm *vm);
|
2019-01-10 14:49:54 +07:00
|
|
|
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
|
|
|
|
|
2016-09-28 20:41:50 +07:00
|
|
|
#endif
|