mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 02:46:45 +07:00
Merge tag 'amd-drm-fixes-5.9-2020-08-07' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-fixes-5.9-2020-08-07: amdgpu: - Re-add spelling typo fix - Sienna Cichlid fixes - Navy Flounder fixes - DC fixes - SMU i2c fix - Power fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200807222843.3909-1-alexander.deucher@amd.com
This commit is contained in:
commit
16e6eea29d
@ -2574,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
||||
AMD_IP_BLOCK_TYPE_IH,
|
||||
};
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
adev->ip_blocks[i].status.hw = false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||
int j;
|
||||
struct amdgpu_ip_block *block;
|
||||
@ -2581,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
||||
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||
block = &adev->ip_blocks[j];
|
||||
|
||||
block->status.hw = false;
|
||||
if (block->version->type != ip_order[i] ||
|
||||
!block->status.valid)
|
||||
continue;
|
||||
|
@ -3212,6 +3212,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip crit temp on APU */
|
||||
if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
if (!adev->pm.dpm_enabled &&
|
||||
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
|
||||
|
@ -193,12 +193,18 @@ static int psp_sw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
psp_memory_training_fini(&adev->psp);
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
if (adev->psp.sos_fw) {
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
}
|
||||
if (adev->psp.asd_fw) {
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
adev->psp.asd_fw = NULL;
|
||||
}
|
||||
if (adev->psp.ta_fw) {
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10)
|
||||
psp_sysfs_fini(adev);
|
||||
@ -409,11 +415,28 @@ static int psp_clear_vf_fw(struct psp_context *psp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool psp_skip_tmr(struct psp_context *psp)
|
||||
{
|
||||
switch (psp->adev->asic_type) {
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int psp_tmr_load(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
|
||||
* Already set up by host driver.
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
@ -1987,7 +2010,7 @@ static int psp_suspend(void *handle)
|
||||
|
||||
ret = psp_tmr_terminate(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Falied to terminate tmr\n");
|
||||
DRM_ERROR("Failed to terminate tmr\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1618,7 +1618,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
data = con->eh_data;
|
||||
save_count = data->count - control->num_recs;
|
||||
/* only new entries are saved */
|
||||
if (save_count > 0)
|
||||
if (save_count > 0) {
|
||||
if (amdgpu_ras_eeprom_process_recods(control,
|
||||
&data->bps[control->num_recs],
|
||||
true,
|
||||
@ -1627,6 +1627,9 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3082,7 +3082,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
@ -3127,7 +3127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
@ -3158,7 +3158,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
|
||||
};
|
||||
@ -7529,6 +7529,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
default:
|
||||
|
@ -49,12 +49,11 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
|
||||
static int jpeg_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
}
|
||||
adev->jpeg.num_jpeg_inst = 1;
|
||||
|
||||
jpeg_v3_0_set_dec_ring_funcs(adev);
|
||||
|
@ -97,6 +97,49 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
u64 r;
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* read low 32 bit */
|
||||
WREG32(address, reg);
|
||||
(void)RREG32(address);
|
||||
r = RREG32(data);
|
||||
|
||||
/* read high 32 bit*/
|
||||
WREG32(address, reg + 4);
|
||||
(void)RREG32(address);
|
||||
r |= ((u64)RREG32(data) << 32);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
|
||||
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||
|
||||
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||
/* write low 32 bit */
|
||||
WREG32(address, reg);
|
||||
(void)RREG32(address);
|
||||
WREG32(data, (u32)(v & 0xffffffffULL));
|
||||
(void)RREG32(data);
|
||||
|
||||
/* write high 32 bit */
|
||||
WREG32(address, reg + 4);
|
||||
(void)RREG32(address);
|
||||
WREG32(data, (u32)(v >> 32));
|
||||
(void)RREG32(data);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags, address, data;
|
||||
@ -319,10 +362,15 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
||||
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
|
||||
amdgpu_reset_method);
|
||||
|
||||
if (smu_baco_is_support(smu))
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
default:
|
||||
if (smu_baco_is_support(smu))
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
}
|
||||
}
|
||||
|
||||
static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
@ -673,6 +721,8 @@ static int nv_common_early_init(void *handle)
|
||||
adev->smc_wreg = NULL;
|
||||
adev->pcie_rreg = &nv_pcie_rreg;
|
||||
adev->pcie_wreg = &nv_pcie_wreg;
|
||||
adev->pcie_rreg64 = &nv_pcie_rreg64;
|
||||
adev->pcie_wreg64 = &nv_pcie_wreg64;
|
||||
|
||||
/* TODO: will add them during VCN v2 implementation */
|
||||
adev->uvd_ctx_rreg = NULL;
|
||||
|
@ -1659,7 +1659,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
|
||||
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_vcn_dec_ring_test_ring,
|
||||
.test_ring = vcn_v2_0_dec_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_dec_ring_test_ib,
|
||||
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
|
||||
.insert_start = vcn_v2_0_dec_ring_insert_start,
|
||||
|
@ -97,6 +97,8 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
|
||||
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
|
||||
#endif
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
@ -1185,10 +1187,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
|
||||
break;
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
@ -8544,6 +8549,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Check connector changes */
|
||||
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
|
||||
/* Skip connectors that are disabled or part of modeset already. */
|
||||
if (!old_con_state->crtc && !new_con_state->crtc)
|
||||
continue;
|
||||
|
||||
if (!new_con_state->crtc)
|
||||
continue;
|
||||
|
||||
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
|
||||
if (IS_ERR(new_crtc_state)) {
|
||||
ret = PTR_ERR(new_crtc_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (dm_old_con_state->abm_level !=
|
||||
dm_new_con_state->abm_level)
|
||||
new_crtc_state->connectors_changed = true;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->asic_type >= CHIP_NAVI10) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "dmub/dmub_srv.h"
|
||||
#include "resource.h"
|
||||
#include "dsc.h"
|
||||
#include "dc_link_dp.h"
|
||||
|
||||
struct dmub_debugfs_trace_header {
|
||||
uint32_t entry_count;
|
||||
@ -1150,7 +1151,7 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
|
||||
static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
@ -1186,7 +1187,7 @@ static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_bytes_per_pixel);
|
||||
dsc_state.dsc_bits_per_pixel);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
@ -1460,9 +1461,9 @@ static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
|
||||
static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_bytes_per_pixel_read,
|
||||
.read = dp_dsc_bits_per_pixel_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
@ -1552,7 +1553,7 @@ static const struct {
|
||||
{"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
|
||||
{"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
|
||||
{"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
|
||||
{"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
|
||||
{"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
|
||||
{"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
|
||||
{"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
|
||||
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
|
||||
|
@ -2834,6 +2834,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
||||
.bios_parser_destroy = bios_parser_destroy,
|
||||
|
||||
.get_board_layout_info = bios_get_board_layout_info,
|
||||
|
||||
.get_atom_dc_golden_table = NULL
|
||||
};
|
||||
|
||||
static bool bios_parser_construct(
|
||||
|
@ -2079,6 +2079,85 @@ static uint16_t bios_parser_pack_data_tables(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct atom_dc_golden_table_v1 *bios_get_golden_table(
|
||||
struct bios_parser *bp,
|
||||
uint32_t rev_major,
|
||||
uint32_t rev_minor,
|
||||
uint16_t *dc_golden_table_ver)
|
||||
{
|
||||
struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL;
|
||||
uint32_t dc_golden_offset = 0;
|
||||
*dc_golden_table_ver = 0;
|
||||
|
||||
if (!DATA_TABLES(dce_info))
|
||||
return NULL;
|
||||
|
||||
/* ver.4.4 or higher */
|
||||
switch (rev_major) {
|
||||
case 4:
|
||||
switch (rev_minor) {
|
||||
case 4:
|
||||
disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
|
||||
DATA_TABLES(dce_info));
|
||||
if (!disp_cntl_tbl_4_4)
|
||||
return NULL;
|
||||
dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset;
|
||||
*dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dc_golden_offset)
|
||||
return NULL;
|
||||
|
||||
if (*dc_golden_table_ver != 1)
|
||||
return NULL;
|
||||
|
||||
return GET_IMAGE(struct atom_dc_golden_table_v1,
|
||||
dc_golden_offset);
|
||||
}
|
||||
|
||||
static enum bp_result bios_get_atom_dc_golden_table(
|
||||
struct dc_bios *dcb)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision tbl_revision;
|
||||
uint16_t dc_golden_table_ver = 0;
|
||||
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(dce_info));
|
||||
if (!header)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
get_atom_data_table_revision(header, &tbl_revision);
|
||||
|
||||
atom_dc_golden_table = bios_get_golden_table(bp,
|
||||
tbl_revision.major,
|
||||
tbl_revision.minor,
|
||||
&dc_golden_table_ver);
|
||||
|
||||
if (!atom_dc_golden_table)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver;
|
||||
dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val;
|
||||
dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val;
|
||||
dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val;
|
||||
dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static const struct dc_vbios_funcs vbios_funcs = {
|
||||
.get_connectors_number = bios_parser_get_connectors_number,
|
||||
|
||||
@ -2128,6 +2207,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
||||
|
||||
.get_board_layout_info = bios_get_board_layout_info,
|
||||
.pack_data_tables = bios_parser_pack_data_tables,
|
||||
|
||||
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table
|
||||
};
|
||||
|
||||
static bool bios_parser2_construct(
|
||||
|
@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
|
||||
return disp_clk_threshold;
|
||||
}
|
||||
|
||||
static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
|
||||
static void ramp_up_dispclk_with_dpp(
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
struct dc *dc,
|
||||
struct dc_clocks *new_clocks,
|
||||
bool safe_to_lower)
|
||||
{
|
||||
int i;
|
||||
int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
|
||||
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
|
||||
|
||||
/* this function is to change dispclk, dppclk and dprefclk according to
|
||||
* bandwidth requirement. Its call stack is rv1_update_clocks -->
|
||||
* update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
|
||||
* --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
|
||||
* prepare_bandwidth will be called first to allow enough clock,
|
||||
* watermark for change, after end of dcn hw change, optimize_bandwidth
|
||||
* is executed to lower clock to save power for new dcn hw settings.
|
||||
*
|
||||
* below is sequence of commit_planes_for_stream:
|
||||
*
|
||||
* step 1: prepare_bandwidth - raise clock to have enough bandwidth
|
||||
* step 2: lock_doublebuffer_enable
|
||||
* step 3: pipe_control_lock(true) - make dchubp register change will
|
||||
* not take effect right way
|
||||
* step 4: apply_ctx_for_surface - program dchubp
|
||||
* step 5: pipe_control_lock(false) - dchubp register change take effect
|
||||
* step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
|
||||
* for full_date, optimize clock to save power
|
||||
*
|
||||
* at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
|
||||
* changed for new dchubp configuration. but real dcn hub dchubps are
|
||||
* still running with old configuration until end of step 5. this need
|
||||
* clocks settings at step 1 should not less than that before step 1.
|
||||
* this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
|
||||
* , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
|
||||
* new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
|
||||
* 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
|
||||
*
|
||||
* the second condition is based on new dchubp configuration. dppclk
|
||||
* for new dchubp may be different from dppclk before step 1.
|
||||
* for example, before step 1, dchubps are as below:
|
||||
* pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
|
||||
* pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
|
||||
* for dppclk for pipe0 need dppclk = dispclk
|
||||
*
|
||||
* new dchubp pipe split configuration:
|
||||
* pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
|
||||
* pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
|
||||
* dppclk only needs dppclk = dispclk /2.
|
||||
*
|
||||
* dispclk, dppclk are not lock by otg master lock. they take effect
|
||||
* after step 1. during this transition, dispclk are the same, but
|
||||
* dppclk is changed to half of previous clock for old dchubp
|
||||
* configuration between step 1 and step 6. This may cause p-state
|
||||
* warning intermittently.
|
||||
*
|
||||
* for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
|
||||
* need make sure dppclk are not changed to less between step 1 and 6.
|
||||
* for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
|
||||
* new display clock is raised, but we do not know ratio of
|
||||
* new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
|
||||
* new_clocks->dispclk_khz /2 does not guarantee equal or higher than
|
||||
* old dppclk. we could ignore power saving different between
|
||||
* dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
|
||||
* as long as safe_to_lower = false, set dpclk = dispclk to simplify
|
||||
* condition check.
|
||||
* todo: review this change for other asic.
|
||||
**/
|
||||
if (!safe_to_lower)
|
||||
request_dpp_div = false;
|
||||
|
||||
/* set disp clk to dpp clk threshold */
|
||||
|
||||
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
|
||||
@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
|
||||
|| new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
|
||||
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
|
||||
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
send_request_to_lower = true;
|
||||
}
|
||||
|
@ -323,9 +323,10 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
/* if clock is being raised, increase refclk before lowering DTO */
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||
/* always update dtos unless clock is lowered and not safe to lower */
|
||||
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
|
||||
* that we do not lower dto when it is not safe to lower. We do not need to
|
||||
* compare the current and new dppclk before calling this function.*/
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1250,6 +1250,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
int i, k, l;
|
||||
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
dc_streams[i] = context->streams[i];
|
||||
@ -1838,6 +1841,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
||||
int i;
|
||||
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
if (dc->idle_optimizations_allowed)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
|
||||
#endif
|
||||
if (stream_status == NULL || stream_status->plane_count != surface_count)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
|
||||
@ -2306,8 +2314,14 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
|
||||
#endif
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
|
||||
context_clock_trace(dc, context);
|
||||
}
|
||||
|
||||
|
@ -1540,6 +1540,9 @@ static bool dc_link_construct(struct dc_link *link,
|
||||
}
|
||||
}
|
||||
|
||||
if (bios->funcs->get_atom_dc_golden_table)
|
||||
bios->funcs->get_atom_dc_golden_table(bios);
|
||||
|
||||
/*
|
||||
* TODO check if GPIO programmed correctly
|
||||
*
|
||||
@ -3102,6 +3105,9 @@ void core_link_enable_stream(
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_status status;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
|
||||
#endif
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
|
||||
@ -3136,8 +3142,8 @@ void core_link_enable_stream(
|
||||
pipe_ctx->stream->link->link_state_valid = true;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
|
||||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
|
||||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
|
||||
#endif
|
||||
|
||||
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
|
||||
@ -3276,7 +3282,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
|
||||
core_link_set_avmute(pipe_ctx, true);
|
||||
}
|
||||
|
||||
|
@ -1133,6 +1133,44 @@ static inline enum link_training_result perform_link_training_int(
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum link_training_result check_link_loss_status(
|
||||
struct dc_link *link,
|
||||
const struct link_training_settings *link_training_setting)
|
||||
{
|
||||
enum link_training_result status = LINK_TRAINING_SUCCESS;
|
||||
union lane_status lane_status;
|
||||
uint8_t dpcd_buf[6] = {0};
|
||||
uint32_t lane;
|
||||
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_SINK_COUNT,
|
||||
(uint8_t *)(dpcd_buf),
|
||||
sizeof(dpcd_buf));
|
||||
|
||||
/*parse lane status*/
|
||||
for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
|
||||
/*
|
||||
* check lanes status
|
||||
*/
|
||||
lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
|
||||
|
||||
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
|
||||
!lane_status.bits.CR_DONE_0 ||
|
||||
!lane_status.bits.SYMBOL_LOCKED_0) {
|
||||
/* if one of the channel equalization, clock
|
||||
* recovery or symbol lock is dropped
|
||||
* consider it as (link has been
|
||||
* dropped) dp sink status has changed
|
||||
*/
|
||||
status = LINK_TRAINING_LINK_LOSS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void initialize_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting,
|
||||
@ -1372,6 +1410,9 @@ static void print_status_message(
|
||||
case LINK_TRAINING_LQA_FAIL:
|
||||
lt_result = "LQA failed";
|
||||
break;
|
||||
case LINK_TRAINING_LINK_LOSS:
|
||||
lt_result = "Link loss";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1531,6 +1572,14 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
status);
|
||||
}
|
||||
|
||||
/* delay 5ms after Main Link output idle pattern and then check
|
||||
* DPCD 0202h.
|
||||
*/
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
|
||||
msleep(5);
|
||||
status = check_link_loss_status(link, <_settings);
|
||||
}
|
||||
|
||||
/* 6. print status message*/
|
||||
print_status_message(link, <_settings, status);
|
||||
|
||||
@ -4290,22 +4339,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
||||
|
||||
void dpcd_set_source_specific_data(struct dc_link *link)
|
||||
{
|
||||
uint8_t dspc = 0;
|
||||
enum dc_status ret;
|
||||
|
||||
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
|
||||
sizeof(dspc));
|
||||
|
||||
if (ret != DC_OK) {
|
||||
DC_LOG_ERROR("Error in DP aux read transaction,"
|
||||
" not writing source specific data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return if OUI unsupported */
|
||||
if (!(dspc & DP_OUI_SUPPORT))
|
||||
return;
|
||||
|
||||
if (!link->dc->vendor_signature.is_valid) {
|
||||
struct dpcd_amd_signature amd_signature;
|
||||
amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
|
||||
|
@ -246,20 +246,18 @@ struct dc_stream_status *dc_stream_get_status(
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
/**
|
||||
* dc_optimize_timing() - dc to optimize timing
|
||||
* dc_optimize_timing_for_fsft() - dc to optimize timing
|
||||
*/
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
bool dc_optimize_timing_for_fsft(
|
||||
struct dc_stream_state *pStream,
|
||||
unsigned int max_input_rate_in_khz)
|
||||
{
|
||||
//optimization is expected to assing a value to these:
|
||||
//timing->pix_clk_100hz
|
||||
//timing->v_front_porch
|
||||
//timing->v_total
|
||||
//timing->fast_transport_output_rate_100hz;
|
||||
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
|
||||
struct dc *dc;
|
||||
|
||||
return true;
|
||||
dc = pStream->ctx->dc;
|
||||
|
||||
return (dc->hwss.optimize_timing_for_fsft &&
|
||||
dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -133,6 +133,9 @@ struct dc_vbios_funcs {
|
||||
uint16_t (*pack_data_tables)(
|
||||
struct dc_bios *dcb,
|
||||
void *dst);
|
||||
|
||||
enum bp_result (*get_atom_dc_golden_table)(
|
||||
struct dc_bios *dcb);
|
||||
};
|
||||
|
||||
struct bios_registers {
|
||||
@ -154,6 +157,7 @@ struct dc_bios {
|
||||
struct dc_firmware_info fw_info;
|
||||
bool fw_info_valid;
|
||||
struct dc_vram_info vram_info;
|
||||
struct dc_golden_table golden_table;
|
||||
};
|
||||
|
||||
#endif /* DC_BIOS_TYPES_H */
|
||||
|
@ -424,8 +424,8 @@ struct dc_stream_status *dc_stream_get_status(
|
||||
struct dc_stream_state *dc_stream);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
bool dc_optimize_timing_for_fsft(
|
||||
struct dc_stream_state *pStream,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
|
||||
|
@ -890,6 +890,20 @@ struct dsc_dec_dpcd_caps {
|
||||
uint32_t branch_max_line_width;
|
||||
};
|
||||
|
||||
struct dc_golden_table {
|
||||
uint16_t dc_golden_table_ver;
|
||||
uint32_t aux_dphy_rx_control0_val;
|
||||
uint32_t aux_dphy_tx_control_val;
|
||||
uint32_t aux_dphy_rx_control1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_0_val;
|
||||
uint32_t dc_gpio_aux_ctrl_1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_2_val;
|
||||
uint32_t dc_gpio_aux_ctrl_3_val;
|
||||
uint32_t dc_gpio_aux_ctrl_4_val;
|
||||
uint32_t dc_gpio_aux_ctrl_5_val;
|
||||
};
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
enum dc_gpu_mem_alloc_type {
|
||||
DC_MEM_ALLOC_TYPE_GART,
|
||||
|
@ -38,7 +38,8 @@
|
||||
|
||||
#define AUX_REG_LIST(id)\
|
||||
SRI(AUX_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
|
||||
|
||||
#define HPD_REG_LIST(id)\
|
||||
SRI(DC_HPD_CONTROL, HPD, id)
|
||||
@ -107,6 +108,7 @@
|
||||
struct dce110_link_enc_aux_registers {
|
||||
uint32_t AUX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL0;
|
||||
uint32_t AUX_DPHY_RX_CONTROL1;
|
||||
};
|
||||
|
||||
struct dce110_link_enc_hpd_registers {
|
||||
|
@ -233,8 +233,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
|
||||
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
|
||||
true : false;
|
||||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
|
||||
copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
|
||||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 0;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->dmub_srv);
|
||||
|
@ -390,6 +390,8 @@ void dcn10_log_hw_state(struct dc *dc,
|
||||
}
|
||||
DTN_INFO("\n");
|
||||
|
||||
// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
|
||||
// TODO: Update golden log header to reflect this name change
|
||||
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
|
||||
for (i = 0; i < pool->res_cap->num_dsc; i++) {
|
||||
struct display_stream_compressor *dsc = pool->dscs[i];
|
||||
@ -400,7 +402,7 @@ void dcn10_log_hw_state(struct dc *dc,
|
||||
dsc->inst,
|
||||
s.dsc_clock_en,
|
||||
s.dsc_slice_width,
|
||||
s.dsc_bytes_per_pixel);
|
||||
s.dsc_bits_per_pixel);
|
||||
DTN_INFO("\n");
|
||||
}
|
||||
DTN_INFO("\n");
|
||||
|
@ -31,10 +31,10 @@
|
||||
#define TO_DCN10_LINK_ENC(link_encoder)\
|
||||
container_of(link_encoder, struct dcn10_link_encoder, base)
|
||||
|
||||
|
||||
#define AUX_REG_LIST(id)\
|
||||
SRI(AUX_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
|
||||
SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
|
||||
|
||||
#define HPD_REG_LIST(id)\
|
||||
SRI(DC_HPD_CONTROL, HPD, id)
|
||||
@ -73,6 +73,7 @@ struct dcn10_link_enc_aux_registers {
|
||||
uint32_t AUX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL0;
|
||||
uint32_t AUX_DPHY_TX_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL1;
|
||||
};
|
||||
|
||||
struct dcn10_link_enc_hpd_registers {
|
||||
@ -443,7 +444,10 @@ struct dcn10_link_enc_registers {
|
||||
type AUX_TX_PRECHARGE_LEN; \
|
||||
type AUX_TX_PRECHARGE_SYMBOLS; \
|
||||
type AUX_MODE_DET_CHECK_DELAY;\
|
||||
type DPCS_DBG_CBUS_DIS
|
||||
type DPCS_DBG_CBUS_DIS;\
|
||||
type AUX_RX_PRECHARGE_SKIP;\
|
||||
type AUX_RX_TIMEOUT_LEN;\
|
||||
type AUX_RX_TIMEOUT_LEN_MUL
|
||||
|
||||
struct dcn10_link_enc_shift {
|
||||
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
|
@ -156,7 +156,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
|
||||
|
||||
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
|
||||
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
|
||||
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
|
||||
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
|
||||
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
|
||||
|
@ -2498,3 +2498,30 @@ void dcn20_fpga_init_hw(struct dc *dc)
|
||||
tg->funcs->tg_init(tg);
|
||||
}
|
||||
}
|
||||
#ifndef TRIM_FSFT
|
||||
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz)
|
||||
{
|
||||
unsigned int old_v_front_porch;
|
||||
unsigned int old_v_total;
|
||||
unsigned int max_input_rate_in_100hz;
|
||||
unsigned long long new_v_total;
|
||||
|
||||
max_input_rate_in_100hz = max_input_rate_in_khz * 10;
|
||||
if (max_input_rate_in_100hz < timing->pix_clk_100hz)
|
||||
return false;
|
||||
|
||||
old_v_total = timing->v_total;
|
||||
old_v_front_porch = timing->v_front_porch;
|
||||
|
||||
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
|
||||
timing->pix_clk_100hz = max_input_rate_in_100hz;
|
||||
|
||||
new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
|
||||
|
||||
timing->v_total = new_v_total;
|
||||
timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
@ -132,5 +132,10 @@ int dcn20_init_sys_ctx(struct dce_hwseq *hws,
|
||||
struct dc *dc,
|
||||
struct dc_phy_addr_space_config *pa_config);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
bool dcn20_optimize_timing_for_fsft(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
#endif /* __DC_HWSS_DCN20_H__ */
|
||||
|
||||
|
@ -88,6 +88,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn20_private_funcs = {
|
||||
|
@ -309,7 +309,6 @@ bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
|
||||
void enc2_hw_init(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
/*
|
||||
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
|
||||
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
|
||||
@ -333,9 +332,18 @@ void enc2_hw_init(struct link_encoder *enc)
|
||||
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
|
||||
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
|
||||
*/
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
|
||||
} else {
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
|
||||
|
||||
}
|
||||
|
||||
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
|
||||
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
|
||||
|
@ -191,7 +191,10 @@
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh)
|
||||
LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
|
||||
LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
|
||||
|
||||
#define UNIPHY_DCN2_REG_LIST(id) \
|
||||
SRI(CLOCK_ENABLE, SYMCLK, id), \
|
||||
|
@ -2223,7 +2223,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
if (!res_ctx->pipe_ctx[i].plane_state) {
|
||||
pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
|
||||
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
|
||||
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
|
||||
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s;
|
||||
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
|
||||
pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
|
||||
if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
|
||||
@ -2235,7 +2235,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
|
||||
pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
|
||||
pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
|
||||
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
|
||||
pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
|
||||
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
|
||||
pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
|
||||
pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
|
||||
@ -3069,8 +3069,7 @@ void dcn20_calculate_dlg_params(
|
||||
int pipe_cnt,
|
||||
int vlevel)
|
||||
{
|
||||
int i, j, pipe_idx, pipe_idx_unsplit;
|
||||
bool visited[MAX_PIPES] = { 0 };
|
||||
int i, pipe_idx;
|
||||
|
||||
/* Writeback MCIF_WB arbitration parameters */
|
||||
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
|
||||
@ -3089,55 +3088,17 @@ void dcn20_calculate_dlg_params(
|
||||
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
|
||||
|
||||
/*
|
||||
* An artifact of dml pipe split/odm is that pipes get merged back together for
|
||||
* calculation. Therefore we need to only extract for first pipe in ascending index order
|
||||
* and copy into the other split half.
|
||||
*/
|
||||
for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
if (!visited[pipe_idx]) {
|
||||
display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
|
||||
display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
|
||||
|
||||
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
|
||||
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
|
||||
dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
|
||||
dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
|
||||
/*
|
||||
* j iterates inside pipes array, unlike i which iterates inside
|
||||
* pipe_ctx array
|
||||
*/
|
||||
if (src->is_hsplit)
|
||||
for (j = pipe_idx + 1; j < pipe_cnt; j++) {
|
||||
display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
|
||||
display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
|
||||
|
||||
if (src_j->is_hsplit && !visited[j]
|
||||
&& src->hsplit_grp == src_j->hsplit_grp) {
|
||||
dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
|
||||
dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
|
||||
dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
|
||||
dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
|
||||
visited[j] = true;
|
||||
}
|
||||
}
|
||||
visited[pipe_idx] = true;
|
||||
pipe_idx_unsplit++;
|
||||
}
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
ASSERT(visited[pipe_idx]);
|
||||
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
|
||||
pipe_idx++;
|
||||
}
|
||||
|
@ -92,6 +92,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn21_private_funcs = {
|
||||
|
@ -62,7 +62,7 @@ static const struct link_encoder_funcs dcn30_link_enc_funcs = {
|
||||
.read_state = link_enc2_read_state,
|
||||
.validate_output_with_stream =
|
||||
dcn30_link_encoder_validate_output_with_stream,
|
||||
.hw_init = enc2_hw_init,
|
||||
.hw_init = enc3_hw_init,
|
||||
.setup = dcn10_link_encoder_setup,
|
||||
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
|
||||
.enable_dp_output = dcn20_link_encoder_enable_dp_output,
|
||||
@ -203,3 +203,54 @@ void dcn30_link_encoder_construct(
|
||||
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define AUX_REG(reg)\
|
||||
(enc10->aux_regs->reg)
|
||||
|
||||
#define AUX_REG_READ(reg_name) \
|
||||
dm_read_reg(CTX, AUX_REG(reg_name))
|
||||
|
||||
#define AUX_REG_WRITE(reg_name, val) \
|
||||
dm_write_reg(CTX, AUX_REG(reg_name), val)
|
||||
void enc3_hw_init(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
/*
|
||||
00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
|
||||
01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
|
||||
02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
|
||||
03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
|
||||
04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
|
||||
05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
|
||||
06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
|
||||
07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
|
||||
*/
|
||||
|
||||
/*
|
||||
AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
|
||||
AUX_RX_START_WINDOW = 1 [6:4]
|
||||
AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
|
||||
AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1
|
||||
AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1
|
||||
AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1
|
||||
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
|
||||
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
|
||||
*/
|
||||
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
|
||||
|
||||
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
|
||||
|
||||
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
|
||||
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
|
||||
// 27MHz -> 0xd
|
||||
// 100MHz -> 0x32
|
||||
// 48MHz -> 0x18
|
||||
|
||||
// Set TMDS_CTL0 to 1. This is a legacy setting.
|
||||
REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
|
||||
|
||||
dcn10_aux_initialize(enc10);
|
||||
}
|
||||
|
@ -73,4 +73,6 @@ void dcn30_link_encoder_construct(
|
||||
const struct dcn10_link_enc_shift *link_shift,
|
||||
const struct dcn10_link_enc_mask *link_mask);
|
||||
|
||||
void enc3_hw_init(struct link_encoder *enc);
|
||||
|
||||
#endif /* __DC_LINK_ENCODER__DCN30_H__ */
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "dce110/dce110_hw_sequencer.h"
|
||||
#include "dcn10/dcn10_hw_sequencer.h"
|
||||
#include "dcn20/dcn20_hwseq.h"
|
||||
#include "dcn21/dcn21_hwseq.h"
|
||||
#include "dcn30_hwseq.h"
|
||||
|
||||
static const struct hw_sequencer_funcs dcn30_funcs = {
|
||||
@ -87,8 +88,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
|
||||
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn30_private_funcs = {
|
||||
|
@ -154,23 +154,11 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimeP
|
||||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
|
||||
dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
|
||||
|
||||
dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
|
||||
dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
|
||||
dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
|
||||
dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
|
||||
|
||||
unsigned int get_vstartup_calculated(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
unsigned int num_pipes,
|
||||
unsigned int which_pipe)
|
||||
{
|
||||
unsigned int which_plane;
|
||||
|
||||
recalculate_params(mode_lib, pipes, num_pipes);
|
||||
which_plane = mode_lib->vba.pipe_plane[which_pipe];
|
||||
return mode_lib->vba.VStartup[which_plane];
|
||||
}
|
||||
|
||||
double get_total_immediate_flip_bytes(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
@ -479,7 +467,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
||||
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
|
||||
1;
|
||||
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
|
||||
mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
|
||||
mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;;
|
||||
mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
|
||||
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
|
||||
dout->dsc_slices;
|
||||
mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
|
||||
|
@ -98,16 +98,11 @@ dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us);
|
||||
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us);
|
||||
dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us);
|
||||
|
||||
dml_get_pipe_attr_decl(vstartup);
|
||||
dml_get_pipe_attr_decl(vupdate_offset);
|
||||
dml_get_pipe_attr_decl(vupdate_width);
|
||||
dml_get_pipe_attr_decl(vready_offset);
|
||||
|
||||
unsigned int get_vstartup_calculated(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
unsigned int num_pipes,
|
||||
unsigned int which_pipe);
|
||||
|
||||
double get_total_immediate_flip_bytes(
|
||||
struct display_mode_lib *mode_lib,
|
||||
const display_e2e_pipe_params_st *pipes,
|
||||
|
@ -71,8 +71,9 @@ enum dentist_divider_range {
|
||||
|
||||
#define CTX \
|
||||
clk_mgr->base.ctx
|
||||
|
||||
#define DC_LOGGER \
|
||||
clk_mgr->ctx->logger
|
||||
clk_mgr->base.ctx->logger
|
||||
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ struct dsc_optc_config {
|
||||
struct dcn_dsc_state {
|
||||
uint32_t dsc_clock_en;
|
||||
uint32_t dsc_slice_width;
|
||||
uint32_t dsc_bytes_per_pixel;
|
||||
uint32_t dsc_bits_per_pixel;
|
||||
uint32_t dsc_slice_height;
|
||||
uint32_t dsc_pic_width;
|
||||
uint32_t dsc_pic_height;
|
||||
|
@ -116,6 +116,11 @@ struct hw_sequencer_funcs {
|
||||
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
|
||||
int num_pipes,
|
||||
const struct dc_static_screen_params *events);
|
||||
#ifndef TRIM_FSFT
|
||||
bool (*optimize_timing_for_fsft)(struct dc *dc,
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
|
||||
/* Stream Related */
|
||||
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
|
||||
|
@ -66,6 +66,8 @@ enum link_training_result {
|
||||
/* other failure during EQ step */
|
||||
LINK_TRAINING_EQ_FAIL_EQ,
|
||||
LINK_TRAINING_LQA_FAIL,
|
||||
/* one of the CR,EQ or symbol lock is dropped */
|
||||
LINK_TRAINING_LINK_LOSS,
|
||||
};
|
||||
|
||||
struct link_training_settings {
|
||||
|
@ -829,10 +829,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
|
||||
switch (packet_type) {
|
||||
case PACKET_TYPE_FS_V3:
|
||||
#ifndef TRIM_FSFT
|
||||
// always populate with pixel rate.
|
||||
build_vrr_infopacket_v3(
|
||||
stream->signal, vrr,
|
||||
stream->timing.flags.FAST_TRANSPORT,
|
||||
stream->timing.fast_transport_output_rate_100hz,
|
||||
(stream->timing.flags.FAST_TRANSPORT) ?
|
||||
stream->timing.fast_transport_output_rate_100hz :
|
||||
stream->timing.pix_clk_100hz,
|
||||
app_tf, infopacket);
|
||||
#else
|
||||
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
|
||||
|
@ -941,7 +941,6 @@ struct atom_display_controller_info_v4_1
|
||||
uint8_t reserved3[8];
|
||||
};
|
||||
|
||||
|
||||
struct atom_display_controller_info_v4_2
|
||||
{
|
||||
struct atom_common_table_header table_header;
|
||||
@ -976,6 +975,59 @@ struct atom_display_controller_info_v4_2
|
||||
uint8_t reserved3[8];
|
||||
};
|
||||
|
||||
struct atom_display_controller_info_v4_4 {
|
||||
struct atom_common_table_header table_header;
|
||||
uint32_t display_caps;
|
||||
uint32_t bootup_dispclk_10khz;
|
||||
uint16_t dce_refclk_10khz;
|
||||
uint16_t i2c_engine_refclk_10khz;
|
||||
uint16_t dvi_ss_percentage; // in unit of 0.001%
|
||||
uint16_t dvi_ss_rate_10hz;
|
||||
uint16_t hdmi_ss_percentage; // in unit of 0.001%
|
||||
uint16_t hdmi_ss_rate_10hz;
|
||||
uint16_t dp_ss_percentage; // in unit of 0.001%
|
||||
uint16_t dp_ss_rate_10hz;
|
||||
uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode
|
||||
uint8_t ss_reserved;
|
||||
uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
|
||||
uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
|
||||
uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
|
||||
uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
|
||||
uint16_t dpphy_refclk_10khz;
|
||||
uint16_t hw_chip_id;
|
||||
uint8_t dcnip_min_ver;
|
||||
uint8_t dcnip_max_ver;
|
||||
uint8_t max_disp_pipe_num;
|
||||
uint8_t max_vbios_active_disp_pipum;
|
||||
uint8_t max_ppll_num;
|
||||
uint8_t max_disp_phy_num;
|
||||
uint8_t max_aux_pairs;
|
||||
uint8_t remotedisplayconfig;
|
||||
uint32_t dispclk_pll_vco_freq;
|
||||
uint32_t dp_ref_clk_freq;
|
||||
uint32_t max_mclk_chg_lat; // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us)
|
||||
uint32_t max_sr_exit_lat; // Worst case memory self refresh exit time, units of 100ns of ns (0.1us)
|
||||
uint32_t max_sr_enter_exit_lat; // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us)
|
||||
uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx
|
||||
uint16_t dc_golden_table_ver;
|
||||
uint32_t reserved3[3];
|
||||
};
|
||||
|
||||
struct atom_dc_golden_table_v1
|
||||
{
|
||||
uint32_t aux_dphy_rx_control0_val;
|
||||
uint32_t aux_dphy_tx_control_val;
|
||||
uint32_t aux_dphy_rx_control1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_0_val;
|
||||
uint32_t dc_gpio_aux_ctrl_1_val;
|
||||
uint32_t dc_gpio_aux_ctrl_2_val;
|
||||
uint32_t dc_gpio_aux_ctrl_3_val;
|
||||
uint32_t dc_gpio_aux_ctrl_4_val;
|
||||
uint32_t dc_gpio_aux_ctrl_5_val;
|
||||
uint32_t reserved[23];
|
||||
};
|
||||
|
||||
enum dce_info_caps_def
|
||||
{
|
||||
|
@ -133,6 +133,78 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->dpm_set_vcn_enable)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&power_gate->vcn_gated) ^ enable)
|
||||
return 0;
|
||||
|
||||
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
|
||||
if (!ret)
|
||||
atomic_set(&power_gate->vcn_gated, !enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&power_gate->vcn_gate_lock);
|
||||
|
||||
ret = smu_dpm_set_vcn_enable_locked(smu, enable);
|
||||
|
||||
mutex_unlock(&power_gate->vcn_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&power_gate->jpeg_gated) ^ enable)
|
||||
return 0;
|
||||
|
||||
ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
|
||||
if (!ret)
|
||||
atomic_set(&power_gate->jpeg_gated, !enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
|
||||
|
||||
mutex_unlock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
|
||||
*
|
||||
@ -353,6 +425,45 @@ static int smu_early_init(void *handle)
|
||||
return smu_set_funcs(adev);
|
||||
}
|
||||
|
||||
static int smu_set_default_dpm_table(struct smu_context *smu)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int vcn_gate, jpeg_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->ppt_funcs->set_default_dpm_table)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&power_gate->vcn_gate_lock);
|
||||
mutex_lock(&power_gate->jpeg_gate_lock);
|
||||
|
||||
vcn_gate = atomic_read(&power_gate->vcn_gated);
|
||||
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
|
||||
|
||||
ret = smu_dpm_set_vcn_enable_locked(smu, true);
|
||||
if (ret)
|
||||
goto err0_out;
|
||||
|
||||
ret = smu_dpm_set_jpeg_enable_locked(smu, true);
|
||||
if (ret)
|
||||
goto err1_out;
|
||||
|
||||
ret = smu->ppt_funcs->set_default_dpm_table(smu);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev,
|
||||
"Failed to setup default dpm clock tables!\n");
|
||||
|
||||
smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
|
||||
err1_out:
|
||||
smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
|
||||
err0_out:
|
||||
mutex_unlock(&power_gate->jpeg_gate_lock);
|
||||
mutex_unlock(&power_gate->vcn_gate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -579,6 +690,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -586,6 +701,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
|
||||
|
||||
ret = smu_free_memory_pool(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -643,6 +760,11 @@ static int smu_sw_init(void *handle)
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
|
||||
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
|
||||
mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
|
||||
mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
|
||||
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
|
||||
@ -734,7 +856,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
||||
uint32_t pcie_gen = 0, pcie_width = 0;
|
||||
int ret;
|
||||
|
||||
if (smu_is_dpm_running(smu) && adev->in_suspend) {
|
||||
if (adev->in_suspend && smu_is_dpm_running(smu)) {
|
||||
dev_info(adev->dev, "dpm has been enabled\n");
|
||||
return 0;
|
||||
}
|
||||
@ -844,10 +966,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_disable_umc_cdr_12gbps_workaround(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
|
||||
@ -1046,8 +1164,6 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
smu_i2c_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
cancel_work_sync(&smu->throttling_logging_work);
|
||||
|
||||
ret = smu_disable_thermal_alert(smu);
|
||||
@ -1590,6 +1706,9 @@ int smu_set_mp1_state(struct smu_context *smu,
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg(smu, msg, NULL);
|
||||
/* some asics may not support those messages */
|
||||
if (ret == -EINVAL)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
|
||||
|
||||
@ -1944,6 +2063,10 @@ int smu_read_sensor(struct smu_context *smu,
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->read_sensor)
|
||||
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
|
||||
goto unlock;
|
||||
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
|
||||
*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
|
||||
@ -1966,7 +2089,7 @@ int smu_read_sensor(struct smu_context *smu,
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
|
||||
*(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
|
||||
*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
|
||||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
|
||||
@ -1974,11 +2097,12 @@ int smu_read_sensor(struct smu_context *smu,
|
||||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
if (smu->ppt_funcs->read_sensor)
|
||||
ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
|
||||
*size = 0;
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -1849,8 +1849,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
|
||||
|
||||
static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -1861,7 +1859,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
|
||||
@ -1870,7 +1867,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2080,22 +2076,11 @@ static const struct i2c_algorithm arcturus_i2c_algo = {
|
||||
.functionality = arcturus_i2c_func,
|
||||
};
|
||||
|
||||
static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (arcturus_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
@ -2111,9 +2096,6 @@ static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter
|
||||
|
||||
static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!arcturus_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
@ -292,8 +292,10 @@ struct smu_dpm_context {
|
||||
struct smu_power_gate {
|
||||
bool uvd_gated;
|
||||
bool vce_gated;
|
||||
bool vcn_gated;
|
||||
bool jpeg_gated;
|
||||
atomic_t vcn_gated;
|
||||
atomic_t jpeg_gated;
|
||||
struct mutex vcn_gate_lock;
|
||||
struct mutex jpeg_gate_lock;
|
||||
};
|
||||
|
||||
struct smu_power_context {
|
||||
|
@ -27,7 +27,7 @@
|
||||
// *** IMPORTANT ***
|
||||
// SMU TEAM: Always increment the interface version if
|
||||
// any structure is changed in this file
|
||||
#define SMU11_DRIVER_IF_VERSION 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION 0x34
|
||||
|
||||
#define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
|
||||
|
||||
@ -968,9 +968,15 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
uint32_t CurrClock[PPCLK_COUNT];
|
||||
uint16_t AverageGfxclkFrequency;
|
||||
uint16_t AverageFclkFrequency;
|
||||
uint16_t AverageUclkFrequency ;
|
||||
|
||||
uint16_t AverageGfxclkFrequencyPreDs;
|
||||
uint16_t AverageGfxclkFrequencyPostDs;
|
||||
uint16_t AverageFclkFrequencyPreDs;
|
||||
uint16_t AverageFclkFrequencyPostDs;
|
||||
uint16_t AverageUclkFrequencyPreDs ;
|
||||
uint16_t AverageUclkFrequencyPostDs ;
|
||||
|
||||
|
||||
uint16_t AverageGfxActivity ;
|
||||
uint16_t AverageUclkActivity ;
|
||||
uint8_t CurrSocVoltageOffset ;
|
||||
@ -988,6 +994,7 @@ typedef struct {
|
||||
uint16_t TemperatureLiquid0 ;
|
||||
uint16_t TemperatureLiquid1 ;
|
||||
uint16_t TemperaturePlx ;
|
||||
uint16_t Padding16 ;
|
||||
uint32_t ThrottlerStatus ;
|
||||
|
||||
uint8_t LinkDpmLevel;
|
||||
@ -1006,8 +1013,10 @@ typedef struct {
|
||||
uint16_t AverageDclk0Frequency ;
|
||||
uint16_t AverageVclk1Frequency ;
|
||||
uint16_t AverageDclk1Frequency ;
|
||||
uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
|
||||
uint16_t padding16_2;
|
||||
uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
|
||||
uint8_t PcieRate ;
|
||||
uint8_t PcieWidth ;
|
||||
|
||||
} SmuMetrics_t;
|
||||
|
||||
typedef struct {
|
||||
|
@ -30,8 +30,8 @@
|
||||
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
|
||||
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
|
||||
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
|
||||
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
|
||||
|
||||
/* MP Apertures */
|
||||
#define MP0_Public 0x03800000
|
||||
|
@ -785,8 +785,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
||||
|
||||
static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -796,14 +794,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -811,8 +807,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
|
||||
static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -821,14 +815,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2457,22 +2449,11 @@ static const struct i2c_algorithm navi10_i2c_algo = {
|
||||
.functionality = navi10_i2c_func,
|
||||
};
|
||||
|
||||
static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (navi10_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
@ -2488,9 +2469,6 @@ static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *
|
||||
|
||||
static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!navi10_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
@ -459,8 +459,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
|
||||
|
||||
static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -470,14 +468,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -485,8 +481,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
|
||||
static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -495,14 +489,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -70,14 +70,16 @@
|
||||
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
|
||||
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
|
||||
|
||||
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
|
||||
|
||||
static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1),
|
||||
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
|
||||
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
|
||||
MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
|
||||
MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
|
||||
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
|
||||
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
|
||||
MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
|
||||
MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
|
||||
MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
|
||||
@ -85,42 +87,43 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
|
||||
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
|
||||
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
|
||||
MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
|
||||
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
|
||||
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1),
|
||||
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
|
||||
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1),
|
||||
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1),
|
||||
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
|
||||
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
|
||||
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
|
||||
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
|
||||
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
|
||||
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
|
||||
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
|
||||
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
|
||||
MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
|
||||
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1),
|
||||
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
|
||||
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
|
||||
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
|
||||
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
|
||||
MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1),
|
||||
MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1),
|
||||
MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1),
|
||||
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1),
|
||||
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1),
|
||||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1),
|
||||
MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
|
||||
MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0),
|
||||
MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0),
|
||||
MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0),
|
||||
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
|
||||
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
|
||||
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
|
||||
MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0),
|
||||
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
|
||||
MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
|
||||
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
|
||||
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1),
|
||||
MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
|
||||
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
|
||||
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
|
||||
@ -442,13 +445,16 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
|
||||
*value = metrics->CurrClock[PPCLK_DCEFCLK];
|
||||
break;
|
||||
case METRICS_AVERAGE_GFXCLK:
|
||||
*value = metrics->AverageGfxclkFrequency;
|
||||
if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
|
||||
*value = metrics->AverageGfxclkFrequencyPostDs;
|
||||
else
|
||||
*value = metrics->AverageGfxclkFrequencyPreDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_FCLK:
|
||||
*value = metrics->AverageFclkFrequency;
|
||||
*value = metrics->AverageFclkFrequencyPostDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_UCLK:
|
||||
*value = metrics->AverageUclkFrequency;
|
||||
*value = metrics->AverageUclkFrequencyPostDs;
|
||||
break;
|
||||
case METRICS_AVERAGE_GFXACTIVITY:
|
||||
*value = metrics->AverageGfxActivity;
|
||||
@ -760,10 +766,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
|
||||
|
||||
static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -779,7 +782,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
|
||||
@ -792,7 +794,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -800,8 +801,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
|
||||
|
||||
static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
@ -810,14 +809,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2624,22 +2621,11 @@ static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
|
||||
.functionality = sienna_cichlid_i2c_func,
|
||||
};
|
||||
|
||||
static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (sienna_cichlid_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
@ -2655,9 +2641,6 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_a
|
||||
|
||||
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!sienna_cichlid_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
|
||||
switch (type) {
|
||||
case CMN2ASIC_MAPPING_MSG:
|
||||
if (index > SMU_MSG_MAX_COUNT ||
|
||||
if (index >= SMU_MSG_MAX_COUNT ||
|
||||
!smu->message_map)
|
||||
return -EINVAL;
|
||||
|
||||
@ -181,7 +181,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
return msg_mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_CLK:
|
||||
if (index > SMU_CLK_COUNT ||
|
||||
if (index >= SMU_CLK_COUNT ||
|
||||
!smu->clock_map)
|
||||
return -EINVAL;
|
||||
|
||||
@ -192,7 +192,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_FEATURE:
|
||||
if (index > SMU_FEATURE_COUNT ||
|
||||
if (index >= SMU_FEATURE_COUNT ||
|
||||
!smu->feature_map)
|
||||
return -EINVAL;
|
||||
|
||||
@ -203,7 +203,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_TABLE:
|
||||
if (index > SMU_TABLE_COUNT ||
|
||||
if (index >= SMU_TABLE_COUNT ||
|
||||
!smu->table_map)
|
||||
return -EINVAL;
|
||||
|
||||
@ -214,7 +214,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_PWR:
|
||||
if (index > SMU_POWER_SOURCE_COUNT ||
|
||||
if (index >= SMU_POWER_SOURCE_COUNT ||
|
||||
!smu->pwr_src_map)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -60,7 +60,6 @@
|
||||
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
|
||||
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
|
||||
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
|
||||
#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
|
||||
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
|
||||
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
|
||||
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
|
||||
@ -77,8 +76,6 @@
|
||||
#define smu_get_dal_power_level(smu, clocks) smu_ppt_funcs(get_dal_power_level, 0, smu, clocks)
|
||||
#define smu_get_perf_level(smu, designation, level) smu_ppt_funcs(get_perf_level, 0, smu, designation, level)
|
||||
#define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
|
||||
#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
|
||||
#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
|
||||
#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
|
||||
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
|
||||
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
|
||||
|
@ -1029,6 +1029,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
return 0;
|
||||
if (enable)
|
||||
|
@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
|
||||
CGS_IND_REG__SMC, FEATURE_STATUS,
|
||||
VOLTAGE_CONTROLLER_ON))
|
||||
? true : false;
|
||||
}
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
|
Loading…
Reference in New Issue
Block a user