mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 01:15:16 +07:00
3e2e2ab554
Setup and tear down xgmi as part of psp. v2: - make psp_xgmi_terminate static - squash in: drm/amdgpu: only issue xgmi cmd when it is enabled drm/amdgpu/psp: terminate xgmi ta in suspend and hw_fini phase Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
825 lines
19 KiB
C
825 lines
19 KiB
C
/*
|
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
* Author: Huang Rui
|
|
*
|
|
*/
|
|
|
|
#include <linux/firmware.h>
|
|
#include <drm/drmP.h>
|
|
#include "amdgpu.h"
|
|
#include "amdgpu_psp.h"
|
|
#include "amdgpu_ucode.h"
|
|
#include "soc15_common.h"
|
|
#include "psp_v3_1.h"
|
|
#include "psp_v10_0.h"
|
|
#include "psp_v11_0.h"
|
|
|
|
static void psp_set_funcs(struct amdgpu_device *adev);
|
|
|
|
static int psp_early_init(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
psp_set_funcs(adev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_sw_init(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
struct psp_context *psp = &adev->psp;
|
|
int ret;
|
|
|
|
switch (adev->asic_type) {
|
|
case CHIP_VEGA10:
|
|
case CHIP_VEGA12:
|
|
psp_v3_1_set_psp_funcs(psp);
|
|
break;
|
|
case CHIP_RAVEN:
|
|
psp_v10_0_set_psp_funcs(psp);
|
|
break;
|
|
case CHIP_VEGA20:
|
|
psp_v11_0_set_psp_funcs(psp);
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
psp->adev = adev;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
ret = psp_init_microcode(psp);
|
|
if (ret) {
|
|
DRM_ERROR("Failed to load psp firmware!\n");
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_sw_fini(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
release_firmware(adev->psp.sos_fw);
|
|
adev->psp.sos_fw = NULL;
|
|
release_firmware(adev->psp.asd_fw);
|
|
adev->psp.asd_fw = NULL;
|
|
release_firmware(adev->psp.ta_fw);
|
|
adev->psp.ta_fw = NULL;
|
|
return 0;
|
|
}
|
|
|
|
int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
|
uint32_t reg_val, uint32_t mask, bool check_changed)
|
|
{
|
|
uint32_t val;
|
|
int i;
|
|
struct amdgpu_device *adev = psp->adev;
|
|
|
|
for (i = 0; i < adev->usec_timeout; i++) {
|
|
val = RREG32(reg_index);
|
|
if (check_changed) {
|
|
if (val != reg_val)
|
|
return 0;
|
|
} else {
|
|
if ((val & mask) == reg_val)
|
|
return 0;
|
|
}
|
|
udelay(1);
|
|
}
|
|
|
|
return -ETIME;
|
|
}
|
|
|
|
static int
|
|
psp_cmd_submit_buf(struct psp_context *psp,
|
|
struct amdgpu_firmware_info *ucode,
|
|
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
|
|
{
|
|
int ret;
|
|
int index;
|
|
|
|
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
|
|
|
|
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
|
|
|
|
index = atomic_inc_return(&psp->fence_value);
|
|
ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
|
|
fence_mc_addr, index);
|
|
if (ret) {
|
|
atomic_dec(&psp->fence_value);
|
|
return ret;
|
|
}
|
|
|
|
while (*((unsigned int *)psp->fence_buf) != index)
|
|
msleep(1);
|
|
|
|
/* the status field must be 0 after FW is loaded */
|
|
if (ucode && psp->cmd_buf_mem->resp.status) {
|
|
DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n",
|
|
psp->cmd_buf_mem->resp.status, ucode->ucode_id);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (ucode) {
|
|
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
|
|
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
uint64_t tmr_mc, uint32_t size)
|
|
{
|
|
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
|
|
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
|
|
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
|
|
cmd->cmd.cmd_setup_tmr.buf_size = size;
|
|
}
|
|
|
|
/* Set up Trusted Memory Region */
|
|
static int psp_tmr_init(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* Allocate 3M memory aligned to 1M from Frame Buffer (local
|
|
* physical).
|
|
*
|
|
* Note: this memory need be reserved till the driver
|
|
* uninitializes.
|
|
*/
|
|
ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int psp_tmr_load(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
struct psp_gfx_cmd_resp *cmd;
|
|
|
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!cmd)
|
|
return -ENOMEM;
|
|
|
|
psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
|
|
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
|
|
PSP_TMR_SIZE, psp->tmr_mc_addr);
|
|
|
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
psp->fence_buf_mc_addr);
|
|
if (ret)
|
|
goto failed;
|
|
|
|
kfree(cmd);
|
|
|
|
return 0;
|
|
|
|
failed:
|
|
kfree(cmd);
|
|
return ret;
|
|
}
|
|
|
|
static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
uint64_t asd_mc, uint64_t asd_mc_shared,
|
|
uint32_t size, uint32_t shared_size)
|
|
{
|
|
cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
|
|
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
|
|
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
|
|
cmd->cmd.cmd_load_ta.app_len = size;
|
|
|
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
|
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
|
|
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
}
|
|
|
|
static int psp_asd_init(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
|
* physical) for shared ASD <-> Driver
|
|
*/
|
|
ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
&psp->asd_shared_bo,
|
|
&psp->asd_shared_mc_addr,
|
|
&psp->asd_shared_buf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int psp_asd_load(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
struct psp_gfx_cmd_resp *cmd;
|
|
|
|
/* If PSP version doesn't match ASD version, asd loading will be failed.
|
|
* add workaround to bypass it for sriov now.
|
|
* TODO: add version check to make it common
|
|
*/
|
|
if (amdgpu_sriov_vf(psp->adev))
|
|
return 0;
|
|
|
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!cmd)
|
|
return -ENOMEM;
|
|
|
|
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
|
memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
|
|
|
|
psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
|
|
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
|
|
|
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
psp->fence_buf_mc_addr);
|
|
|
|
kfree(cmd);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
|
|
uint32_t xgmi_ta_size, uint32_t shared_size)
|
|
{
|
|
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
|
|
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
|
|
cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
|
|
|
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
|
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
|
|
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
}
|
|
|
|
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
|
* physical) for xgmi ta <-> Driver
|
|
*/
|
|
ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
|
|
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
|
&psp->xgmi_context.xgmi_shared_bo,
|
|
&psp->xgmi_context.xgmi_shared_mc_addr,
|
|
&psp->xgmi_context.xgmi_shared_buf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int psp_xgmi_load(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
struct psp_gfx_cmd_resp *cmd;
|
|
|
|
/*
|
|
* TODO: bypass the loading in sriov for now
|
|
*/
|
|
if (amdgpu_sriov_vf(psp->adev))
|
|
return 0;
|
|
|
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!cmd)
|
|
return -ENOMEM;
|
|
|
|
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
|
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
|
|
|
|
psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
|
psp->xgmi_context.xgmi_shared_mc_addr,
|
|
psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
|
|
|
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
psp->fence_buf_mc_addr);
|
|
|
|
if (!ret) {
|
|
psp->xgmi_context.initialized = 1;
|
|
psp->xgmi_context.session_id = cmd->resp.session_id;
|
|
}
|
|
|
|
kfree(cmd);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
uint32_t xgmi_session_id)
|
|
{
|
|
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
|
cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
|
|
}
|
|
|
|
static int psp_xgmi_unload(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
struct psp_gfx_cmd_resp *cmd;
|
|
|
|
/*
|
|
* TODO: bypass the unloading in sriov for now
|
|
*/
|
|
if (amdgpu_sriov_vf(psp->adev))
|
|
return 0;
|
|
|
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!cmd)
|
|
return -ENOMEM;
|
|
|
|
psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
|
|
|
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
psp->fence_buf_mc_addr);
|
|
|
|
kfree(cmd);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
uint32_t ta_cmd_id,
|
|
uint32_t xgmi_session_id)
|
|
{
|
|
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
|
|
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
}
|
|
|
|
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
|
{
|
|
int ret;
|
|
struct psp_gfx_cmd_resp *cmd;
|
|
|
|
/*
|
|
* TODO: bypass the loading in sriov for now
|
|
*/
|
|
if (amdgpu_sriov_vf(psp->adev))
|
|
return 0;
|
|
|
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!cmd)
|
|
return -ENOMEM;
|
|
|
|
psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
psp->xgmi_context.session_id);
|
|
|
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
psp->fence_buf_mc_addr);
|
|
|
|
kfree(cmd);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int psp_xgmi_terminate(struct psp_context *psp)
|
|
{
|
|
int ret;
|
|
|
|
if (!psp->xgmi_context.initialized)
|
|
return 0;
|
|
|
|
ret = psp_xgmi_unload(psp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
psp->xgmi_context.initialized = 0;
|
|
|
|
/* free xgmi shared memory */
|
|
amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
|
|
&psp->xgmi_context.xgmi_shared_mc_addr,
|
|
&psp->xgmi_context.xgmi_shared_buf);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_xgmi_initialize(struct psp_context *psp)
|
|
{
|
|
struct ta_xgmi_shared_memory *xgmi_cmd;
|
|
int ret;
|
|
|
|
if (!psp->xgmi_context.initialized) {
|
|
ret = psp_xgmi_init_shared_buf(psp);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
/* Load XGMI TA */
|
|
ret = psp_xgmi_load(psp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Initialize XGMI session */
|
|
xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
|
|
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
|
|
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
|
|
|
|
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int psp_hw_start(struct psp_context *psp)
|
|
{
|
|
struct amdgpu_device *adev = psp->adev;
|
|
int ret;
|
|
|
|
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
|
|
ret = psp_bootloader_load_sysdrv(psp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = psp_bootloader_load_sos(psp);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = psp_tmr_load(psp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = psp_asd_load(psp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
|
ret = psp_xgmi_initialize(psp);
|
|
/* Warning the XGMI seesion initialize failure
|
|
* Instead of stop driver initialization
|
|
*/
|
|
if (ret)
|
|
dev_err(psp->adev->dev,
|
|
"XGMI: Failed to initialize XGMI session\n");
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int psp_np_fw_load(struct psp_context *psp)
|
|
{
|
|
int i, ret;
|
|
struct amdgpu_firmware_info *ucode;
|
|
struct amdgpu_device* adev = psp->adev;
|
|
|
|
for (i = 0; i < adev->firmware.max_ucodes; i++) {
|
|
ucode = &adev->firmware.ucode[i];
|
|
if (!ucode->fw)
|
|
continue;
|
|
|
|
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
|
|
psp_smu_reload_quirk(psp))
|
|
continue;
|
|
if (amdgpu_sriov_vf(adev) &&
|
|
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
|
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
|
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
|
|
/*skip ucode loading in SRIOV VF */
|
|
continue;
|
|
|
|
ret = psp_prep_cmd_buf(ucode, psp->cmd);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
|
|
psp->fence_buf_mc_addr);
|
|
if (ret)
|
|
return ret;
|
|
|
|
#if 0
|
|
/* check if firmware loaded sucessfully */
|
|
if (!amdgpu_psp_check_fw_loading_status(adev, i))
|
|
return -EINVAL;
|
|
#endif
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_load_fw(struct amdgpu_device *adev)
|
|
{
|
|
int ret;
|
|
struct psp_context *psp = &adev->psp;
|
|
|
|
if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
|
|
goto skip_memalloc;
|
|
|
|
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
if (!psp->cmd)
|
|
return -ENOMEM;
|
|
|
|
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
|
AMDGPU_GEM_DOMAIN_GTT,
|
|
&psp->fw_pri_bo,
|
|
&psp->fw_pri_mc_addr,
|
|
&psp->fw_pri_buf);
|
|
if (ret)
|
|
goto failed;
|
|
|
|
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
&psp->fence_buf_bo,
|
|
&psp->fence_buf_mc_addr,
|
|
&psp->fence_buf);
|
|
if (ret)
|
|
goto failed_mem2;
|
|
|
|
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
|
|
AMDGPU_GEM_DOMAIN_VRAM,
|
|
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
|
|
(void **)&psp->cmd_buf_mem);
|
|
if (ret)
|
|
goto failed_mem1;
|
|
|
|
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
|
|
|
|
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
|
|
if (ret)
|
|
goto failed_mem;
|
|
|
|
ret = psp_tmr_init(psp);
|
|
if (ret)
|
|
goto failed_mem;
|
|
|
|
ret = psp_asd_init(psp);
|
|
if (ret)
|
|
goto failed_mem;
|
|
|
|
skip_memalloc:
|
|
ret = psp_hw_start(psp);
|
|
if (ret)
|
|
goto failed_mem;
|
|
|
|
ret = psp_np_fw_load(psp);
|
|
if (ret)
|
|
goto failed_mem;
|
|
|
|
return 0;
|
|
|
|
failed_mem:
|
|
amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
|
|
&psp->cmd_buf_mc_addr,
|
|
(void **)&psp->cmd_buf_mem);
|
|
failed_mem1:
|
|
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
|
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
|
failed_mem2:
|
|
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
|
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
|
failed:
|
|
kfree(psp->cmd);
|
|
psp->cmd = NULL;
|
|
return ret;
|
|
}
|
|
|
|
static int psp_hw_init(void *handle)
|
|
{
|
|
int ret;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
mutex_lock(&adev->firmware.mutex);
|
|
/*
|
|
* This sequence is just used on hw_init only once, no need on
|
|
* resume.
|
|
*/
|
|
ret = amdgpu_ucode_init_bo(adev);
|
|
if (ret)
|
|
goto failed;
|
|
|
|
ret = psp_load_fw(adev);
|
|
if (ret) {
|
|
DRM_ERROR("PSP firmware loading failed\n");
|
|
goto failed;
|
|
}
|
|
|
|
mutex_unlock(&adev->firmware.mutex);
|
|
return 0;
|
|
|
|
failed:
|
|
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
|
mutex_unlock(&adev->firmware.mutex);
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int psp_hw_fini(void *handle)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
struct psp_context *psp = &adev->psp;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
|
|
psp->xgmi_context.initialized == 1)
|
|
psp_xgmi_terminate(psp);
|
|
|
|
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
|
|
|
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
|
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
|
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
|
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
|
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
|
amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
|
|
&psp->asd_shared_buf);
|
|
amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
|
|
(void **)&psp->cmd_buf_mem);
|
|
|
|
kfree(psp->cmd);
|
|
psp->cmd = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_suspend(void *handle)
|
|
{
|
|
int ret;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
struct psp_context *psp = &adev->psp;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
|
|
psp->xgmi_context.initialized == 1) {
|
|
ret = psp_xgmi_terminate(psp);
|
|
if (ret) {
|
|
DRM_ERROR("Failed to terminate xgmi ta\n");
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
|
if (ret) {
|
|
DRM_ERROR("PSP ring stop failed\n");
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int psp_resume(void *handle)
|
|
{
|
|
int ret;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
|
struct psp_context *psp = &adev->psp;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
DRM_INFO("PSP is resuming...\n");
|
|
|
|
mutex_lock(&adev->firmware.mutex);
|
|
|
|
ret = psp_hw_start(psp);
|
|
if (ret)
|
|
goto failed;
|
|
|
|
ret = psp_np_fw_load(psp);
|
|
if (ret)
|
|
goto failed;
|
|
|
|
mutex_unlock(&adev->firmware.mutex);
|
|
|
|
return 0;
|
|
|
|
failed:
|
|
DRM_ERROR("PSP resume failed\n");
|
|
mutex_unlock(&adev->firmware.mutex);
|
|
return ret;
|
|
}
|
|
|
|
int psp_gpu_reset(struct amdgpu_device *adev)
|
|
{
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
|
return 0;
|
|
|
|
return psp_mode1_reset(&adev->psp);
|
|
}
|
|
|
|
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
|
|
enum AMDGPU_UCODE_ID ucode_type)
|
|
{
|
|
struct amdgpu_firmware_info *ucode = NULL;
|
|
|
|
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
|
DRM_INFO("firmware is not loaded by PSP\n");
|
|
return true;
|
|
}
|
|
|
|
if (!adev->firmware.fw_size)
|
|
return false;
|
|
|
|
ucode = &adev->firmware.ucode[ucode_type];
|
|
if (!ucode->fw || !ucode->ucode_size)
|
|
return false;
|
|
|
|
return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
|
|
}
|
|
|
|
static int psp_set_clockgating_state(void *handle,
|
|
enum amd_clockgating_state state)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int psp_set_powergating_state(void *handle,
|
|
enum amd_powergating_state state)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
const struct amd_ip_funcs psp_ip_funcs = {
|
|
.name = "psp",
|
|
.early_init = psp_early_init,
|
|
.late_init = NULL,
|
|
.sw_init = psp_sw_init,
|
|
.sw_fini = psp_sw_fini,
|
|
.hw_init = psp_hw_init,
|
|
.hw_fini = psp_hw_fini,
|
|
.suspend = psp_suspend,
|
|
.resume = psp_resume,
|
|
.is_idle = NULL,
|
|
.check_soft_reset = NULL,
|
|
.wait_for_idle = NULL,
|
|
.soft_reset = NULL,
|
|
.set_clockgating_state = psp_set_clockgating_state,
|
|
.set_powergating_state = psp_set_powergating_state,
|
|
};
|
|
|
|
static const struct amdgpu_psp_funcs psp_funcs = {
|
|
.check_fw_loading_status = psp_check_fw_loading_status,
|
|
};
|
|
|
|
static void psp_set_funcs(struct amdgpu_device *adev)
|
|
{
|
|
if (NULL == adev->firmware.funcs)
|
|
adev->firmware.funcs = &psp_funcs;
|
|
}
|
|
|
|
const struct amdgpu_ip_block_version psp_v3_1_ip_block =
|
|
{
|
|
.type = AMD_IP_BLOCK_TYPE_PSP,
|
|
.major = 3,
|
|
.minor = 1,
|
|
.rev = 0,
|
|
.funcs = &psp_ip_funcs,
|
|
};
|
|
|
|
const struct amdgpu_ip_block_version psp_v10_0_ip_block =
|
|
{
|
|
.type = AMD_IP_BLOCK_TYPE_PSP,
|
|
.major = 10,
|
|
.minor = 0,
|
|
.rev = 0,
|
|
.funcs = &psp_ip_funcs,
|
|
};
|
|
|
|
const struct amdgpu_ip_block_version psp_v11_0_ip_block =
|
|
{
|
|
.type = AMD_IP_BLOCK_TYPE_PSP,
|
|
.major = 11,
|
|
.minor = 0,
|
|
.rev = 0,
|
|
.funcs = &psp_ip_funcs,
|
|
};
|