mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 18:48:01 +07:00
a1d2a63399
- Mali 4xx GPUs have two kinds of processors GP and PP. GP is for OpenGL vertex shader processing and PP is for fragment shader processing. Each processor has its own MMU so prcessors work in virtual address space. - There's only one GP but multiple PP (max 4 for mali 400 and 8 for mali 450) in the same mali 4xx GPU. All PPs are grouped togather to handle a single fragment shader task divided by FB output tiled pixels. Mali 400 user space driver is responsible for assign target tiled pixels to each PP, but mali 450 has a HW module called DLBU to dynamically balance each PP's load. - User space driver allocate buffer object and map into GPU virtual address space, upload command stream and draw data with CPU mmap of the buffer object, then submit task to GP/PP with a register frame indicating where is the command stream and misc settings. - There's no command stream validation/relocation due to each user process has its own GPU virtual address space. GP/PP's MMU switch virtual address space before running two tasks from different user process. Error or evil user space code just get MMU fault or GP/PP error IRQ, then the HW/SW will be recovered. - Use GEM+shmem for MM. Currently just alloc and pin memory when gem object creation. GPU vm map of the buffer is also done in the alloc stage in kernel space. We may delay the memory allocation and real GPU vm map to command submission stage in the furture as improvement. - Use drm_sched for GPU task schedule. Each OpenGL context should have a lima context object in the kernel to distinguish tasks from different user. drm_sched gets task from each lima context in a fair way. mesa driver can be found here before upstreamed: https://gitlab.freedesktop.org/lima/mesa v8: - add comments for in_sync - fix ctx free miss mutex unlock v7: - remove lima_fence_ops with default value - move fence slab create to device probe - check pad ioctl args to be zero - add comments for user/kernel interface v6: - fix comments by checkpatch.pl v5: - export gp/pp version to userspace - rebase on drm-misc-next v4: - use get param interface to get info - separate context create/free ioctl - remove unused max sched task param - update copyright time - use xarray instead of idr - stop using drmP.h v3: - fix comments from kbuild robot - restrict supported arch to tested ones v2: - fix syscall argument check - fix job finish fence leak since kernel 5.0 - use drm syncobj to replace native fence - move buffer object GPU va map into kernel - reserve syscall argument space for future info - remove kernel gem modifier - switch TTM back to GEM+shmem MM - use time based io poll - use whole register name - adopt gem reservation obj integration - use drm_timeout_abs_to_jiffies Cc: Eric Anholt <eric@anholt.net> Cc: Rob Herring <robh@kernel.org> Cc: Christian König <ckoenig.leichtzumerken@gmail.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Alex Deucher <alexdeucher@gmail.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Rob Clark <robdclark@gmail.com> Cc: Dave Airlie <airlied@gmail.com> Signed-off-by: Andreas Baierl <ichgeh@imkreisrum.de> Signed-off-by: Erico Nunes <nunes.erico@gmail.com> Signed-off-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Marek Vasut <marex@denx.de> Signed-off-by: Neil Armstrong <narmstrong@baylibre.com> Signed-off-by: Simon Shields <simon@lineageos.org> Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com> Signed-off-by: Qiang Yu <yuq825@gmail.com> Reviewed-by: Eric Anholt <eric@anholt.net> Reviewed-by: Rob Herring <robh@kerrnel.org> Signed-off-by: Eric Anholt <eric@anholt.net> Link: https://patchwork.freedesktop.org/patch/291200/
123 lines
2.5 KiB
C
123 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
|
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
|
|
|
|
#include <drm/drm_prime.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include "lima_object.h"
|
|
|
|
void lima_bo_destroy(struct lima_bo *bo)
|
|
{
|
|
if (bo->sgt) {
|
|
kfree(bo->pages);
|
|
drm_prime_gem_destroy(&bo->gem, bo->sgt);
|
|
} else {
|
|
if (bo->pages_dma_addr) {
|
|
int i, npages = bo->gem.size >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
if (bo->pages_dma_addr[i])
|
|
dma_unmap_page(bo->gem.dev->dev,
|
|
bo->pages_dma_addr[i],
|
|
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
}
|
|
}
|
|
|
|
if (bo->pages)
|
|
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
|
|
}
|
|
|
|
kfree(bo->pages_dma_addr);
|
|
drm_gem_object_release(&bo->gem);
|
|
kfree(bo);
|
|
}
|
|
|
|
static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
|
|
struct reservation_object *resv)
|
|
{
|
|
struct lima_bo *bo;
|
|
int err;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
|
if (!bo)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
mutex_init(&bo->lock);
|
|
INIT_LIST_HEAD(&bo->va);
|
|
bo->gem.resv = resv;
|
|
|
|
err = drm_gem_object_init(dev->ddev, &bo->gem, size);
|
|
if (err) {
|
|
kfree(bo);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
return bo;
|
|
}
|
|
|
|
struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
|
|
u32 flags, struct sg_table *sgt,
|
|
struct reservation_object *resv)
|
|
{
|
|
int i, err;
|
|
size_t npages;
|
|
struct lima_bo *bo, *ret;
|
|
|
|
bo = lima_bo_create_struct(dev, size, flags, resv);
|
|
if (IS_ERR(bo))
|
|
return bo;
|
|
|
|
npages = bo->gem.size >> PAGE_SHIFT;
|
|
|
|
bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
|
|
if (!bo->pages_dma_addr) {
|
|
ret = ERR_PTR(-ENOMEM);
|
|
goto err_out;
|
|
}
|
|
|
|
if (sgt) {
|
|
bo->sgt = sgt;
|
|
|
|
bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
|
|
if (!bo->pages) {
|
|
ret = ERR_PTR(-ENOMEM);
|
|
goto err_out;
|
|
}
|
|
|
|
err = drm_prime_sg_to_page_addr_arrays(
|
|
sgt, bo->pages, bo->pages_dma_addr, npages);
|
|
if (err) {
|
|
ret = ERR_PTR(err);
|
|
goto err_out;
|
|
}
|
|
} else {
|
|
mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
|
|
bo->pages = drm_gem_get_pages(&bo->gem);
|
|
if (IS_ERR(bo->pages)) {
|
|
ret = ERR_CAST(bo->pages);
|
|
bo->pages = NULL;
|
|
goto err_out;
|
|
}
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
|
|
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
if (dma_mapping_error(dev->dev, addr)) {
|
|
ret = ERR_PTR(-EFAULT);
|
|
goto err_out;
|
|
}
|
|
bo->pages_dma_addr[i] = addr;
|
|
}
|
|
|
|
}
|
|
|
|
return bo;
|
|
|
|
err_out:
|
|
lima_bo_destroy(bo);
|
|
return ret;
|
|
}
|