mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
7198e6b031
Add initial support for a3xx 3d core. So far, with hardware that I've seen to date, we can have: + zero, one, or two z180 2d cores + a3xx or a2xx 3d core, which share a common CP (the firmware for the CP seems to implement some different PM4 packet types but the basics of cmdstream submission are the same) Which means that the eventual complete "class" hierarchy, once support for all past and present hw is in place, becomes: + msm_gpu + adreno_gpu + a3xx_gpu + a2xx_gpu + z180_gpu This commit splits out the parts that will eventually be common between a2xx/a3xx into adreno_gpu, and the parts that are even common to z180 into msm_gpu. Note that there is no cmdstream validation required. All memory access from the GPU is via IOMMU/MMU. So as long as you don't map silly things to the GPU, there isn't much damage that the GPU can do. Signed-off-by: Rob Clark <robdclark@gmail.com>
351 lines
8.7 KiB
C
351 lines
8.7 KiB
C
/*
|
|
* Copyright (C) 2013 Red Hat
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
* the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include "adreno_gpu.h"
|
|
#include "msm_gem.h"
|
|
|
|
struct adreno_info {
|
|
struct adreno_rev rev;
|
|
uint32_t revn;
|
|
const char *name;
|
|
const char *pm4fw, *pfpfw;
|
|
uint32_t gmem;
|
|
};
|
|
|
|
#define ANY_ID 0xff
|
|
|
|
static const struct adreno_info gpulist[] = {
|
|
{
|
|
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
|
|
.revn = 305,
|
|
.name = "A305",
|
|
.pm4fw = "a300_pm4.fw",
|
|
.pfpfw = "a300_pfp.fw",
|
|
.gmem = SZ_256K,
|
|
}, {
|
|
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
|
|
.revn = 320,
|
|
.name = "A320",
|
|
.pm4fw = "a300_pm4.fw",
|
|
.pfpfw = "a300_pfp.fw",
|
|
.gmem = SZ_512K,
|
|
}, {
|
|
.rev = ADRENO_REV(3, 3, 0, 0),
|
|
.revn = 330,
|
|
.name = "A330",
|
|
.pm4fw = "a330_pm4.fw",
|
|
.pfpfw = "a330_pfp.fw",
|
|
.gmem = SZ_1M,
|
|
},
|
|
};
|
|
|
|
#define RB_SIZE SZ_32K
|
|
#define RB_BLKSIZE 16
|
|
|
|
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
|
|
switch (param) {
|
|
case MSM_PARAM_GPU_ID:
|
|
*value = adreno_gpu->info->revn;
|
|
return 0;
|
|
case MSM_PARAM_GMEM_SIZE:
|
|
*value = adreno_gpu->info->gmem;
|
|
return 0;
|
|
default:
|
|
DBG("%s: invalid param: %u", gpu->name, param);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
#define rbmemptr(adreno_gpu, member) \
|
|
((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
|
|
|
|
int adreno_hw_init(struct msm_gpu *gpu)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
|
|
DBG("%s", gpu->name);
|
|
|
|
/* Setup REG_CP_RB_CNTL: */
|
|
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
|
|
/* size is log2(quad-words): */
|
|
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
|
|
AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
|
|
|
|
/* Setup ringbuffer address: */
|
|
gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
|
|
gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
|
|
|
|
/* Setup scratch/timestamp: */
|
|
gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
|
|
|
|
gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static uint32_t get_wptr(struct msm_ringbuffer *ring)
|
|
{
|
|
return ring->cur - ring->start;
|
|
}
|
|
|
|
uint32_t adreno_last_fence(struct msm_gpu *gpu)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
return adreno_gpu->memptrs->fence;
|
|
}
|
|
|
|
int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|
struct msm_file_private *ctx)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
struct msm_ringbuffer *ring = gpu->rb;
|
|
unsigned i, ibs = 0;
|
|
|
|
adreno_gpu->last_fence = submit->fence;
|
|
|
|
for (i = 0; i < submit->nr_cmds; i++) {
|
|
switch (submit->cmd[i].type) {
|
|
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
|
/* ignore IB-targets */
|
|
break;
|
|
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
|
/* ignore if there has not been a ctx switch: */
|
|
if (priv->lastctx == ctx)
|
|
break;
|
|
case MSM_SUBMIT_CMD_BUF:
|
|
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
|
|
OUT_RING(ring, submit->cmd[i].iova);
|
|
OUT_RING(ring, submit->cmd[i].size);
|
|
ibs++;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* on a320, at least, we seem to need to pad things out to an
|
|
* even number of qwords to avoid issue w/ CP hanging on wrap-
|
|
* around:
|
|
*/
|
|
if (ibs % 2)
|
|
OUT_PKT2(ring);
|
|
|
|
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
|
|
OUT_RING(ring, submit->fence);
|
|
|
|
if (adreno_is_a3xx(adreno_gpu)) {
|
|
/* Flush HLSQ lazy updates to make sure there is nothing
|
|
* pending for indirect loads after the timestamp has
|
|
* passed:
|
|
*/
|
|
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
|
|
OUT_RING(ring, HLSQ_FLUSH);
|
|
|
|
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
|
|
OUT_RING(ring, 0x00000000);
|
|
}
|
|
|
|
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
|
|
OUT_RING(ring, CACHE_FLUSH_TS);
|
|
OUT_RING(ring, rbmemptr(adreno_gpu, fence));
|
|
OUT_RING(ring, submit->fence);
|
|
|
|
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
|
|
OUT_PKT3(ring, CP_INTERRUPT, 1);
|
|
OUT_RING(ring, 0x80000000);
|
|
|
|
#if 0
|
|
if (adreno_is_a3xx(adreno_gpu)) {
|
|
/* Dummy set-constant to trigger context rollover */
|
|
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
|
|
OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
|
|
OUT_RING(ring, 0x00000000);
|
|
}
|
|
#endif
|
|
|
|
gpu->funcs->flush(gpu);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void adreno_flush(struct msm_gpu *gpu)
|
|
{
|
|
uint32_t wptr = get_wptr(gpu->rb);
|
|
|
|
/* ensure writes to ringbuffer have hit system memory: */
|
|
mb();
|
|
|
|
gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
|
|
}
|
|
|
|
void adreno_idle(struct msm_gpu *gpu)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
uint32_t rptr, wptr = get_wptr(gpu->rb);
|
|
unsigned long t;
|
|
|
|
t = jiffies + ADRENO_IDLE_TIMEOUT;
|
|
|
|
/* then wait for CP to drain ringbuffer: */
|
|
do {
|
|
rptr = adreno_gpu->memptrs->rptr;
|
|
if (rptr == wptr)
|
|
return;
|
|
} while(time_before(jiffies, t));
|
|
|
|
DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
|
|
|
|
/* TODO maybe we need to reset GPU here to recover from hang? */
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
|
|
seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
|
|
adreno_gpu->info->revn, adreno_gpu->rev.core,
|
|
adreno_gpu->rev.major, adreno_gpu->rev.minor,
|
|
adreno_gpu->rev.patchid);
|
|
|
|
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
|
|
adreno_gpu->last_fence);
|
|
seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
|
|
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
|
|
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
|
|
}
|
|
#endif
|
|
|
|
void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
|
|
{
|
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
uint32_t freedwords;
|
|
do {
|
|
uint32_t size = gpu->rb->size / 4;
|
|
uint32_t wptr = get_wptr(gpu->rb);
|
|
uint32_t rptr = adreno_gpu->memptrs->rptr;
|
|
freedwords = (rptr + (size - 1) - wptr) % size;
|
|
} while(freedwords < ndwords);
|
|
}
|
|
|
|
static const char *iommu_ports[] = {
|
|
"gfx3d_user", "gfx3d_priv",
|
|
"gfx3d1_user", "gfx3d1_priv",
|
|
};
|
|
|
|
static inline bool _rev_match(uint8_t entry, uint8_t id)
|
|
{
|
|
return (entry == ANY_ID) || (entry == id);
|
|
}
|
|
|
|
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
|
|
struct adreno_rev rev)
|
|
{
|
|
int i, ret;
|
|
|
|
/* identify gpu: */
|
|
for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
|
|
const struct adreno_info *info = &gpulist[i];
|
|
if (_rev_match(info->rev.core, rev.core) &&
|
|
_rev_match(info->rev.major, rev.major) &&
|
|
_rev_match(info->rev.minor, rev.minor) &&
|
|
_rev_match(info->rev.patchid, rev.patchid)) {
|
|
gpu->info = info;
|
|
gpu->revn = info->revn;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (i == ARRAY_SIZE(gpulist)) {
|
|
dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
|
|
rev.core, rev.major, rev.minor, rev.patchid);
|
|
return -ENXIO;
|
|
}
|
|
|
|
DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
|
|
rev.core, rev.major, rev.minor, rev.patchid);
|
|
|
|
gpu->funcs = funcs;
|
|
gpu->rev = rev;
|
|
|
|
ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
|
|
if (ret) {
|
|
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
|
|
gpu->info->pm4fw, ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
|
|
if (ret) {
|
|
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
|
|
gpu->info->pfpfw, ret);
|
|
return ret;
|
|
}
|
|
|
|
ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
|
|
gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
|
|
RB_SIZE);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = msm_iommu_attach(drm, gpu->base.iommu,
|
|
iommu_ports, ARRAY_SIZE(iommu_ports));
|
|
if (ret)
|
|
return ret;
|
|
|
|
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
|
|
MSM_BO_UNCACHED);
|
|
if (IS_ERR(gpu->memptrs_bo)) {
|
|
ret = PTR_ERR(gpu->memptrs_bo);
|
|
gpu->memptrs_bo = NULL;
|
|
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
|
|
if (!gpu->memptrs) {
|
|
dev_err(drm->dev, "could not vmap memptrs\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
|
|
&gpu->memptrs_iova);
|
|
if (ret) {
|
|
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
|
{
|
|
if (gpu->memptrs_bo) {
|
|
if (gpu->memptrs_iova)
|
|
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
|
|
drm_gem_object_unreference(gpu->memptrs_bo);
|
|
}
|
|
if (gpu->pm4)
|
|
release_firmware(gpu->pm4);
|
|
if (gpu->pfp)
|
|
release_firmware(gpu->pfp);
|
|
msm_gpu_cleanup(&gpu->base);
|
|
}
|