mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 02:08:16 +07:00
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
Just a few fixes for 4.15. * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: drm/amd/amdgpu: Remove workaround for suspend/resume in uvd7 drm/amdgpu: don't flush the TLB before initializing GART drm/amdgpu: minor cleanup for amdgpu_ttm_bind drm/amdgpu/psp: prevent page fault by checking write_frame address(v4) drm/amd/powerplay: retrieve the real-time coreClock values drm/amd/powerplay: fix performance drop on Vega10 drm/amd/powerplay: add one smc message for Vega10 drm/amd/powerplay: fix amd_powerplay_reset() amdgpu: add padding to the fence to handle ioctl. drm/amdgpu:fix wb_clear drm/amdgpu:fix vf_error_put drm/amdgpu/sriov:now must reinit psp drm/amdgpu: merge bios post checking functions
This commit is contained in:
commit
43106e25ab
@ -546,7 +546,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
|
||||
|
||||
if (offset < adev->wb.num_wb) {
|
||||
__set_bit(offset, adev->wb.used);
|
||||
*wb = offset * 8; /* convert to dw offset */
|
||||
*wb = offset << 3; /* convert to dw offset */
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
@ -564,7 +564,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
|
||||
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
if (wb < adev->wb.num_wb)
|
||||
__clear_bit(wb, adev->wb.used);
|
||||
__clear_bit(wb >> 3, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -744,27 +744,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
||||
if (adev->has_hw_reset) {
|
||||
adev->has_hw_reset = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* bios scratch used on CIK+ */
|
||||
if (adev->asic_type >= CHIP_BONAIRE)
|
||||
return amdgpu_atombios_scratch_need_asic_init(adev);
|
||||
|
||||
/* check MEM_SIZE for older asics */
|
||||
reg = amdgpu_asic_get_config_memsize(adev);
|
||||
|
||||
if ((reg != 0) && (reg != 0xffffffff))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return false;
|
||||
|
||||
@ -787,7 +766,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return amdgpu_need_post(adev);
|
||||
|
||||
if (adev->has_hw_reset) {
|
||||
adev->has_hw_reset = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* bios scratch used on CIK+ */
|
||||
if (adev->asic_type >= CHIP_BONAIRE)
|
||||
return amdgpu_atombios_scratch_need_asic_init(adev);
|
||||
|
||||
/* check MEM_SIZE for older asics */
|
||||
reg = amdgpu_asic_get_config_memsize(adev);
|
||||
|
||||
if ((reg != 0) && (reg != 0xffffffff))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1951,6 +1946,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
||||
|
||||
static enum amd_ip_block_type ip_order[] = {
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
AMD_IP_BLOCK_TYPE_PSP,
|
||||
AMD_IP_BLOCK_TYPE_DCE,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_IP_BLOCK_TYPE_SDMA,
|
||||
@ -2036,12 +2032,17 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
||||
|
||||
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->is_atom_fw) {
|
||||
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
} else {
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (adev->is_atom_fw) {
|
||||
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
} else {
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
}
|
||||
|
||||
if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2208,10 +2209,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
amdgpu_device_detect_sriov_bios(adev);
|
||||
|
||||
/* Post card if necessary */
|
||||
if (amdgpu_vpost_needed(adev)) {
|
||||
if (amdgpu_need_post(adev)) {
|
||||
if (!adev->bios) {
|
||||
dev_err(adev->dev, "no vBIOS found\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
|
||||
r = -EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
@ -2219,7 +2219,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "gpu post error!\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
} else {
|
||||
@ -3023,7 +3022,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||
}
|
||||
} else {
|
||||
dev_err(adev->dev, "asic resume failed (%d).\n", r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
if (adev->rings[i] && adev->rings[i]->sched.thread) {
|
||||
kthread_unpark(adev->rings[i]->sched.thread);
|
||||
@ -3037,7 +3035,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||
if (r) {
|
||||
/* bad news, how to tell it to userspace ? */
|
||||
dev_info(adev->dev, "GPU reset failed\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
|
||||
}
|
||||
else {
|
||||
dev_info(adev->dev, "GPU reset successed!\n");
|
||||
|
@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
adev->gart.pages[p] = pagelist[i];
|
||||
#endif
|
||||
|
||||
if (adev->gart.ptr) {
|
||||
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
|
||||
adev->gart.ptr);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
if (!adev->gart.ptr)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
|
||||
adev->gart.ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
mb();
|
||||
amdgpu_gart_flush_gpu_tlb(adev, 0);
|
||||
|
@ -909,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
||||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
|
||||
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
|
||||
TTM_PL_FLAG_TT;
|
||||
|
||||
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
||||
if (unlikely(r))
|
||||
|
@ -31,7 +31,12 @@ void amdgpu_vf_error_put(struct amdgpu_device *adev,
|
||||
uint64_t error_data)
|
||||
{
|
||||
int index;
|
||||
uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
|
||||
uint16_t error_code;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
|
||||
|
||||
mutex_lock(&adev->virt.vf_errors.lock);
|
||||
index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
|
||||
|
@ -257,6 +257,9 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||
unsigned int psp_write_ptr_reg = 0;
|
||||
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
|
||||
struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
|
||||
ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t ring_size_dw = ring->ring_size / 4;
|
||||
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||
@ -266,9 +269,16 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||
|
||||
/* Update KM RB frame pointer to new frame */
|
||||
if ((psp_write_ptr_reg % ring_size_dw) == 0)
|
||||
write_frame = ring->ring_mem;
|
||||
write_frame = ring_buffer_start;
|
||||
else
|
||||
write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||
/* Check invalid write_frame ptr address */
|
||||
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
|
||||
DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
|
||||
ring_buffer_start, ring_buffer_end, write_frame);
|
||||
DRM_ERROR("write_frame is pointing to address out of bounds\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Initialize KM RB frame */
|
||||
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
|
||||
|
@ -367,6 +367,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
|
||||
unsigned int psp_write_ptr_reg = 0;
|
||||
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
|
||||
struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
|
||||
ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t ring_size_dw = ring->ring_size / 4;
|
||||
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||
@ -378,9 +381,16 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
|
||||
/* write_frame ptr increments by size of rb_frame in bytes */
|
||||
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
|
||||
if ((psp_write_ptr_reg % ring_size_dw) == 0)
|
||||
write_frame = ring->ring_mem;
|
||||
write_frame = ring_buffer_start;
|
||||
else
|
||||
write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||
/* Check invalid write_frame ptr address */
|
||||
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
|
||||
DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
|
||||
ring_buffer_start, ring_buffer_end, write_frame);
|
||||
DRM_ERROR("write_frame is pointing to address out of bounds\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Initialize KM RB frame */
|
||||
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
|
||||
|
@ -592,11 +592,7 @@ static int uvd_v7_0_suspend(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Skip this for APU for now */
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
|
||||
return r;
|
||||
return amdgpu_uvd_suspend(adev);
|
||||
}
|
||||
|
||||
static int uvd_v7_0_resume(void *handle)
|
||||
@ -604,12 +600,10 @@ static int uvd_v7_0_resume(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* Skip this for APU for now */
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_uvd_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return uvd_v7_0_hw_init(adev);
|
||||
}
|
||||
|
||||
|
@ -1184,7 +1184,7 @@ int amd_powerplay_reset(void *handle)
|
||||
int ret;
|
||||
|
||||
ret = pp_check(instance);
|
||||
if (!ret)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pp_hw_fini(instance);
|
||||
|
@ -672,36 +672,20 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
|
||||
PHM_PerformanceLevelDesignation designation, uint32_t index,
|
||||
PHM_PerformanceLevel *level)
|
||||
{
|
||||
const struct rv_power_state *ps;
|
||||
struct rv_hwmgr *data;
|
||||
uint32_t level_index;
|
||||
uint32_t i;
|
||||
uint32_t vol_dep_record_index = 0;
|
||||
|
||||
if (level == NULL || hwmgr == NULL || state == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
data = (struct rv_hwmgr *)(hwmgr->backend);
|
||||
ps = cast_const_rv_ps(state);
|
||||
|
||||
level_index = index > ps->level - 1 ? ps->level - 1 : index;
|
||||
level->coreClock = 30000;
|
||||
|
||||
if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
|
||||
for (i = 1; i < ps->level; i++) {
|
||||
if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) {
|
||||
level->coreClock = 30000;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (level_index == 0) {
|
||||
vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
|
||||
level->memory_clock =
|
||||
data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk;
|
||||
} else {
|
||||
if (index == 0) {
|
||||
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
|
||||
level->coreClock = data->gfx_min_freq_limit;
|
||||
} else {
|
||||
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
|
||||
data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
|
||||
level->coreClock = data->gfx_max_freq_limit;
|
||||
}
|
||||
|
||||
level->nonLocalMemoryFreq = 0;
|
||||
|
@ -2879,6 +2879,15 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
"DPM is already running right , skipping re-enablement!",
|
||||
return 0);
|
||||
|
||||
if ((data->smu_version == 0x001c2c00) ||
|
||||
(data->smu_version == 0x001c2d00)) {
|
||||
tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
|
||||
PP_ASSERT_WITH_CODE(!tmp_result,
|
||||
"Failed to set package power PID!",
|
||||
return tmp_result);
|
||||
}
|
||||
|
||||
tmp_result = vega10_construct_voltage_tables(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!tmp_result,
|
||||
"Failed to contruct voltage tables!",
|
||||
|
@ -131,7 +131,8 @@ typedef uint16_t PPSMC_Result;
|
||||
#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
|
||||
#define PPSMC_MSG_InitializeAcg 0x5F
|
||||
#define PPSMC_MSG_GetCurrPkgPwr 0x61
|
||||
#define PPSMC_Message_Count 0x62
|
||||
#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
|
||||
#define PPSMC_Message_Count 0x69
|
||||
|
||||
|
||||
typedef int PPSMC_Msg;
|
||||
|
@ -553,6 +553,7 @@ union drm_amdgpu_fence_to_handle {
|
||||
struct {
|
||||
struct drm_amdgpu_fence fence;
|
||||
__u32 what;
|
||||
__u32 pad;
|
||||
} in;
|
||||
struct {
|
||||
__u32 handle;
|
||||
|
Loading…
Reference in New Issue
Block a user