mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 00:57:24 +07:00
a1d2a63399
- Mali 4xx GPUs have two kinds of processors GP and PP. GP is for OpenGL vertex shader processing and PP is for fragment shader processing. Each processor has its own MMU so prcessors work in virtual address space. - There's only one GP but multiple PP (max 4 for mali 400 and 8 for mali 450) in the same mali 4xx GPU. All PPs are grouped togather to handle a single fragment shader task divided by FB output tiled pixels. Mali 400 user space driver is responsible for assign target tiled pixels to each PP, but mali 450 has a HW module called DLBU to dynamically balance each PP's load. - User space driver allocate buffer object and map into GPU virtual address space, upload command stream and draw data with CPU mmap of the buffer object, then submit task to GP/PP with a register frame indicating where is the command stream and misc settings. - There's no command stream validation/relocation due to each user process has its own GPU virtual address space. GP/PP's MMU switch virtual address space before running two tasks from different user process. Error or evil user space code just get MMU fault or GP/PP error IRQ, then the HW/SW will be recovered. - Use GEM+shmem for MM. Currently just alloc and pin memory when gem object creation. GPU vm map of the buffer is also done in the alloc stage in kernel space. We may delay the memory allocation and real GPU vm map to command submission stage in the furture as improvement. - Use drm_sched for GPU task schedule. Each OpenGL context should have a lima context object in the kernel to distinguish tasks from different user. drm_sched gets task from each lima context in a fair way. mesa driver can be found here before upstreamed: https://gitlab.freedesktop.org/lima/mesa v8: - add comments for in_sync - fix ctx free miss mutex unlock v7: - remove lima_fence_ops with default value - move fence slab create to device probe - check pad ioctl args to be zero - add comments for user/kernel interface v6: - fix comments by checkpatch.pl v5: - export gp/pp version to userspace - rebase on drm-misc-next v4: - use get param interface to get info - separate context create/free ioctl - remove unused max sched task param - update copyright time - use xarray instead of idr - stop using drmP.h v3: - fix comments from kbuild robot - restrict supported arch to tested ones v2: - fix syscall argument check - fix job finish fence leak since kernel 5.0 - use drm syncobj to replace native fence - move buffer object GPU va map into kernel - reserve syscall argument space for future info - remove kernel gem modifier - switch TTM back to GEM+shmem MM - use time based io poll - use whole register name - adopt gem reservation obj integration - use drm_timeout_abs_to_jiffies Cc: Eric Anholt <eric@anholt.net> Cc: Rob Herring <robh@kernel.org> Cc: Christian König <ckoenig.leichtzumerken@gmail.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Alex Deucher <alexdeucher@gmail.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Rob Clark <robdclark@gmail.com> Cc: Dave Airlie <airlied@gmail.com> Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de> Signed-off-by: Erico Nunes <nunes.erico@gmail.com> Signed-off-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Marek Vasut <marex@denx.de> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com> Signed-off-by: Simon Shields <simon@lineageos.org> Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com> Signed-off-by: Qiang Yu <yuq825@gmail.com> Reviewed-by: Eric Anholt <eric@anholt.net> Reviewed-by: Rob Herring <robh@kerrnel.org> Signed-off-by: Eric Anholt <eric@anholt.net> Link: https://patchwork.freedesktop.org/patch/291200/
283 lines
5.5 KiB
C
283 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
|
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include "lima_device.h"
|
|
#include "lima_vm.h"
|
|
#include "lima_object.h"
|
|
#include "lima_regs.h"
|
|
|
|
struct lima_bo_va {
|
|
struct list_head list;
|
|
unsigned int ref_count;
|
|
|
|
struct drm_mm_node node;
|
|
|
|
struct lima_vm *vm;
|
|
};
|
|
|
|
#define LIMA_VM_PD_SHIFT 22
|
|
#define LIMA_VM_PT_SHIFT 12
|
|
#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
|
|
#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
|
|
|
|
#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
|
|
#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
|
|
|
|
#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
|
|
#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
|
|
#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
|
|
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
|
|
|
|
|
|
static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
|
|
{
|
|
u32 addr;
|
|
|
|
for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
|
|
u32 pbe = LIMA_PBE(addr);
|
|
u32 bte = LIMA_BTE(addr);
|
|
|
|
vm->bts[pbe].cpu[bte] = 0;
|
|
}
|
|
}
|
|
|
|
static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
|
|
u32 start, u32 end)
|
|
{
|
|
u64 addr;
|
|
int i = 0;
|
|
|
|
for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
|
|
u32 pbe = LIMA_PBE(addr);
|
|
u32 bte = LIMA_BTE(addr);
|
|
|
|
if (!vm->bts[pbe].cpu) {
|
|
dma_addr_t pts;
|
|
u32 *pd;
|
|
int j;
|
|
|
|
vm->bts[pbe].cpu = dma_alloc_wc(
|
|
vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
|
|
&vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
|
|
if (!vm->bts[pbe].cpu) {
|
|
if (addr != start)
|
|
lima_vm_unmap_page_table(vm, start, addr - 1);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
pts = vm->bts[pbe].dma;
|
|
pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
|
|
for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
|
|
pd[j] = pts | LIMA_VM_FLAG_PRESENT;
|
|
pts += LIMA_PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct lima_bo_va *
|
|
lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
|
|
{
|
|
struct lima_bo_va *bo_va, *ret = NULL;
|
|
|
|
list_for_each_entry(bo_va, &bo->va, list) {
|
|
if (bo_va->vm == vm) {
|
|
ret = bo_va;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
|
|
{
|
|
struct lima_bo_va *bo_va;
|
|
int err;
|
|
|
|
mutex_lock(&bo->lock);
|
|
|
|
bo_va = lima_vm_bo_find(vm, bo);
|
|
if (bo_va) {
|
|
bo_va->ref_count++;
|
|
mutex_unlock(&bo->lock);
|
|
return 0;
|
|
}
|
|
|
|
/* should not create new bo_va if not asked by caller */
|
|
if (!create) {
|
|
mutex_unlock(&bo->lock);
|
|
return -ENOENT;
|
|
}
|
|
|
|
bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
|
|
if (!bo_va) {
|
|
err = -ENOMEM;
|
|
goto err_out0;
|
|
}
|
|
|
|
bo_va->vm = vm;
|
|
bo_va->ref_count = 1;
|
|
|
|
mutex_lock(&vm->lock);
|
|
|
|
err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
|
|
if (err)
|
|
goto err_out1;
|
|
|
|
err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
|
|
bo_va->node.start + bo_va->node.size - 1);
|
|
if (err)
|
|
goto err_out2;
|
|
|
|
mutex_unlock(&vm->lock);
|
|
|
|
list_add_tail(&bo_va->list, &bo->va);
|
|
|
|
mutex_unlock(&bo->lock);
|
|
return 0;
|
|
|
|
err_out2:
|
|
drm_mm_remove_node(&bo_va->node);
|
|
err_out1:
|
|
mutex_unlock(&vm->lock);
|
|
kfree(bo_va);
|
|
err_out0:
|
|
mutex_unlock(&bo->lock);
|
|
return err;
|
|
}
|
|
|
|
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
|
|
{
|
|
struct lima_bo_va *bo_va;
|
|
|
|
mutex_lock(&bo->lock);
|
|
|
|
bo_va = lima_vm_bo_find(vm, bo);
|
|
if (--bo_va->ref_count > 0) {
|
|
mutex_unlock(&bo->lock);
|
|
return;
|
|
}
|
|
|
|
mutex_lock(&vm->lock);
|
|
|
|
lima_vm_unmap_page_table(vm, bo_va->node.start,
|
|
bo_va->node.start + bo_va->node.size - 1);
|
|
|
|
drm_mm_remove_node(&bo_va->node);
|
|
|
|
mutex_unlock(&vm->lock);
|
|
|
|
list_del(&bo_va->list);
|
|
|
|
mutex_unlock(&bo->lock);
|
|
|
|
kfree(bo_va);
|
|
}
|
|
|
|
u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
|
|
{
|
|
struct lima_bo_va *bo_va;
|
|
u32 ret;
|
|
|
|
mutex_lock(&bo->lock);
|
|
|
|
bo_va = lima_vm_bo_find(vm, bo);
|
|
ret = bo_va->node.start;
|
|
|
|
mutex_unlock(&bo->lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
struct lima_vm *lima_vm_create(struct lima_device *dev)
|
|
{
|
|
struct lima_vm *vm;
|
|
|
|
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
|
|
if (!vm)
|
|
return NULL;
|
|
|
|
vm->dev = dev;
|
|
mutex_init(&vm->lock);
|
|
kref_init(&vm->refcount);
|
|
|
|
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
|
|
GFP_KERNEL | __GFP_ZERO);
|
|
if (!vm->pd.cpu)
|
|
goto err_out0;
|
|
|
|
if (dev->dlbu_cpu) {
|
|
int err = lima_vm_map_page_table(
|
|
vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
|
|
LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
|
|
if (err)
|
|
goto err_out1;
|
|
}
|
|
|
|
drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
|
|
|
|
return vm;
|
|
|
|
err_out1:
|
|
dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
|
|
err_out0:
|
|
kfree(vm);
|
|
return NULL;
|
|
}
|
|
|
|
void lima_vm_release(struct kref *kref)
|
|
{
|
|
struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
|
|
int i;
|
|
|
|
drm_mm_takedown(&vm->mm);
|
|
|
|
for (i = 0; i < LIMA_VM_NUM_BT; i++) {
|
|
if (vm->bts[i].cpu)
|
|
dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
|
|
vm->bts[i].cpu, vm->bts[i].dma);
|
|
}
|
|
|
|
if (vm->pd.cpu)
|
|
dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
|
|
|
|
kfree(vm);
|
|
}
|
|
|
|
void lima_vm_print(struct lima_vm *vm)
|
|
{
|
|
int i, j, k;
|
|
u32 *pd, *pt;
|
|
|
|
if (!vm->pd.cpu)
|
|
return;
|
|
|
|
pd = vm->pd.cpu;
|
|
for (i = 0; i < LIMA_VM_NUM_BT; i++) {
|
|
if (!vm->bts[i].cpu)
|
|
continue;
|
|
|
|
pt = vm->bts[i].cpu;
|
|
for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
|
|
int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
|
|
|
|
printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
|
|
|
|
for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
|
|
u32 pte = *pt++;
|
|
|
|
if (pte)
|
|
printk(KERN_INFO " pt %03x:%08x\n", k, pte);
|
|
}
|
|
}
|
|
}
|
|
}
|