mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 09:42:17 +07:00
drm/amd/powerplay: properly set the dpm_enabled state
On the ASIC powered down(in baco or system suspend), the dpm_enabled will be set as false. Then all access (e.g. df state setting issued on RAS error event) to SMU will be blocked. Signed-off-by: Evan Quan <evan.quan@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
94e0805ba9
commit
774e335b87
@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
|
||||
|
||||
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
size_t size = 0;
|
||||
int ret = 0, i = 0;
|
||||
uint32_t feature_mask[2] = { 0 };
|
||||
@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
|
||||
uint32_t sort_feature[SMU_FEATURE_COUNT];
|
||||
uint64_t hw_feature_count = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
|
||||
uint32_t feature_low = 0, feature_high = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
feature_low = (feature_mask >> 0 ) & 0xffffffff;
|
||||
feature_high = (feature_mask >> 32) & 0xffffffff;
|
||||
|
||||
@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
|
||||
uint64_t feature_2_enabled = 0;
|
||||
uint64_t feature_2_disabled = 0;
|
||||
uint64_t feature_enables = 0;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
@ -417,8 +422,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
|
||||
bool gate)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
switch (block_type) {
|
||||
case AMD_IP_BLOCK_TYPE_UVD:
|
||||
ret = smu_dpm_set_uvd_enable(smu, !gate);
|
||||
@ -569,8 +578,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
|
||||
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t powerplay_table_size;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
|
||||
return -EINVAL;
|
||||
|
||||
@ -591,11 +604,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
|
||||
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (header->usStructureSize != size) {
|
||||
pr_err("pp table size not matched !\n");
|
||||
return -EIO;
|
||||
@ -636,8 +651,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
|
||||
int ret = 0;
|
||||
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
mutex_lock(&feature->mutex);
|
||||
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
|
||||
mutex_unlock(&feature->mutex);
|
||||
@ -1360,10 +1373,7 @@ static int smu_hw_init(void *handle)
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
pr_info("SMU is initialized successfully!\n");
|
||||
|
||||
@ -1397,6 +1407,8 @@ static int smu_hw_fini(void *handle)
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
|
||||
adev->pm.dpm_enabled = false;
|
||||
|
||||
smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)){
|
||||
@ -1538,6 +1550,8 @@ static int smu_suspend(void *handle)
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
|
||||
adev->pm.dpm_enabled = false;
|
||||
|
||||
smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
if(!amdgpu_sriov_vf(adev)) {
|
||||
@ -1594,6 +1608,8 @@ static int smu_resume(void *handle)
|
||||
|
||||
smu->disable_uclk_switch = 0;
|
||||
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
pr_info("SMU is resumed successfully!\n");
|
||||
|
||||
return 0;
|
||||
@ -1605,10 +1621,14 @@ static int smu_resume(void *handle)
|
||||
int smu_display_configuration_change(struct smu_context *smu,
|
||||
const struct amd_pp_display_configuration *display_config)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int index = 0;
|
||||
int num_of_active_display = 0;
|
||||
|
||||
if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_support_sw_smu(smu->adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!display_config)
|
||||
@ -1670,12 +1690,16 @@ int smu_get_current_clocks(struct smu_context *smu,
|
||||
struct amd_pp_clock_info *clocks)
|
||||
{
|
||||
struct amd_pp_simple_clock_info simple_clocks = {0};
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_clock_info hw_clocks;
|
||||
int ret = 0;
|
||||
|
||||
if (!is_support_sw_smu(smu->adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
smu_get_dal_power_level(smu, &simple_clocks);
|
||||
@ -1738,7 +1762,7 @@ static int smu_enable_umd_pstate(void *handle,
|
||||
struct smu_context *smu = (struct smu_context*)(handle);
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
|
||||
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
|
||||
@ -1780,9 +1804,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
long workload;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!skip_display_settings) {
|
||||
ret = smu_display_config_changed(smu);
|
||||
if (ret) {
|
||||
@ -1833,8 +1854,12 @@ int smu_handle_task(struct smu_context *smu,
|
||||
enum amd_pp_task task_id,
|
||||
bool lock_needed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (lock_needed)
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
@ -1868,10 +1893,11 @@ int smu_switch_power_profile(struct smu_context *smu,
|
||||
bool en)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
long workload;
|
||||
uint32_t index;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
|
||||
@ -1902,8 +1928,12 @@ int smu_switch_power_profile(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
enum amd_dpm_forced_level level;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1917,8 +1947,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
|
||||
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1941,8 +1975,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
|
||||
|
||||
int smu_set_display_count(struct smu_context *smu, uint32_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
ret = smu_init_display_count(smu, count);
|
||||
mutex_unlock(&smu->mutex);
|
||||
@ -1956,8 +1994,12 @@ int smu_force_clk_levels(struct smu_context *smu,
|
||||
bool lock_needed)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
|
||||
pr_debug("force clock level is for dpm manual mode only.\n");
|
||||
return -EINVAL;
|
||||
@ -1975,20 +2017,19 @@ int smu_force_clk_levels(struct smu_context *smu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* On system suspending or resetting, the dpm_enabled
|
||||
* flag will be cleared. So that those SMU services which
|
||||
* are not supported will be gated.
|
||||
* However, the mp1 state setting should still be granted
|
||||
* even if the dpm_enabled cleared.
|
||||
*/
|
||||
int smu_set_mp1_state(struct smu_context *smu,
|
||||
enum pp_mp1_state mp1_state)
|
||||
{
|
||||
uint16_t msg;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The SMC is not fully ready. That may be
|
||||
* expected as the IP may be masked.
|
||||
* So, just return without error.
|
||||
*/
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
switch (mp1_state) {
|
||||
@ -2025,15 +2066,11 @@ int smu_set_mp1_state(struct smu_context *smu,
|
||||
int smu_set_df_cstate(struct smu_context *smu,
|
||||
enum pp_df_cstate state)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* The SMC is not fully ready. That may be
|
||||
* expected as the IP may be masked.
|
||||
* So, just return without error.
|
||||
*/
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
|
||||
return 0;
|
||||
@ -2067,6 +2104,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
|
||||
{
|
||||
void *table = smu->smu_table.watermarks_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
@ -2091,8 +2132,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
|
||||
|
||||
int smu_set_ac_dc(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
/* controlled by firmware */
|
||||
if (smu->dc_controlled_by_gpio)
|
||||
return 0;
|
||||
@ -2151,8 +2196,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
|
||||
|
||||
int smu_load_microcode(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->load_microcode)
|
||||
@ -2165,8 +2214,12 @@ int smu_load_microcode(struct smu_context *smu)
|
||||
|
||||
int smu_check_fw_status(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->check_fw_status)
|
||||
@ -2193,8 +2246,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
|
||||
|
||||
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_fan_speed_rpm)
|
||||
@ -2210,10 +2267,15 @@ int smu_get_power_limit(struct smu_context *smu,
|
||||
bool def,
|
||||
bool lock_needed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (lock_needed)
|
||||
if (lock_needed) {
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
}
|
||||
|
||||
if (smu->ppt_funcs->get_power_limit)
|
||||
ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
|
||||
@ -2226,8 +2288,12 @@ int smu_get_power_limit(struct smu_context *smu,
|
||||
|
||||
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_power_limit)
|
||||
@ -2240,8 +2306,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
|
||||
|
||||
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->print_clk_levels)
|
||||
@ -2254,8 +2324,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
|
||||
|
||||
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_od_percentage)
|
||||
@ -2268,8 +2342,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
|
||||
|
||||
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_od_percentage)
|
||||
@ -2284,8 +2362,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
|
||||
enum PP_OD_DPM_TABLE_COMMAND type,
|
||||
long *input, uint32_t size)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->od_edit_dpm_table)
|
||||
@ -2300,8 +2382,12 @@ int smu_read_sensor(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
void *data, uint32_t *size)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->read_sensor)
|
||||
@ -2314,8 +2400,12 @@ int smu_read_sensor(struct smu_context *smu,
|
||||
|
||||
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_power_profile_mode)
|
||||
@ -2331,8 +2421,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
|
||||
uint32_t param_size,
|
||||
bool lock_needed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (lock_needed)
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
@ -2348,8 +2442,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
|
||||
|
||||
int smu_get_fan_control_mode(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_fan_control_mode)
|
||||
@ -2362,8 +2460,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
|
||||
|
||||
int smu_set_fan_control_mode(struct smu_context *smu, int value)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_fan_control_mode)
|
||||
@ -2376,8 +2478,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
|
||||
|
||||
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_fan_speed_percent)
|
||||
@ -2390,8 +2496,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
|
||||
|
||||
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_fan_speed_percent)
|
||||
@ -2404,8 +2514,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
|
||||
|
||||
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_fan_speed_rpm)
|
||||
@ -2418,8 +2532,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
|
||||
|
||||
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_deep_sleep_dcefclk)
|
||||
@ -2432,8 +2550,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
|
||||
|
||||
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (smu->ppt_funcs->set_active_display_count)
|
||||
ret = smu->ppt_funcs->set_active_display_count(smu, count);
|
||||
|
||||
@ -2444,8 +2566,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
|
||||
enum amd_pp_clock_type type,
|
||||
struct amd_pp_clocks *clocks)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_clock_by_type)
|
||||
@ -2459,8 +2585,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
|
||||
int smu_get_max_high_clocks(struct smu_context *smu,
|
||||
struct amd_pp_simple_clock_info *clocks)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_max_high_clocks)
|
||||
@ -2475,8 +2605,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type,
|
||||
struct pp_clock_levels_with_latency *clocks)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_clock_by_type_with_latency)
|
||||
@ -2491,8 +2625,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
|
||||
enum amd_pp_clock_type type,
|
||||
struct pp_clock_levels_with_voltage *clocks)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_clock_by_type_with_voltage)
|
||||
@ -2507,8 +2645,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
|
||||
int smu_display_clock_voltage_request(struct smu_context *smu,
|
||||
struct pp_display_clock_request *clock_req)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->display_clock_voltage_request)
|
||||
@ -2522,8 +2664,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
|
||||
|
||||
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->display_disable_memory_clock_switch)
|
||||
@ -2536,8 +2682,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
|
||||
|
||||
int smu_notify_smu_enable_pwe(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->notify_smu_enable_pwe)
|
||||
@ -2551,8 +2701,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
|
||||
int smu_set_xgmi_pstate(struct smu_context *smu,
|
||||
uint32_t pstate)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_xgmi_pstate)
|
||||
@ -2565,8 +2719,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
|
||||
|
||||
int smu_set_azalia_d3_pme(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_azalia_d3_pme)
|
||||
@ -2577,6 +2735,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* On system suspending or resetting, the dpm_enabled
|
||||
* flag will be cleared. So that those SMU services which
|
||||
* are not supported will be gated.
|
||||
*
|
||||
* However, the baco/mode1 reset should still be granted
|
||||
* as they are still supported and necessary.
|
||||
*/
|
||||
bool smu_baco_is_support(struct smu_context *smu)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -2648,8 +2814,12 @@ int smu_mode2_reset(struct smu_context *smu)
|
||||
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
|
||||
struct pp_smu_nv_clock_table *max_clocks)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
|
||||
@ -2664,8 +2834,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
|
||||
unsigned int *clock_values_in_khz,
|
||||
unsigned int *num_states)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_uclk_dpm_states)
|
||||
@ -2679,6 +2853,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
|
||||
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
|
||||
{
|
||||
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
@ -2693,8 +2871,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
|
||||
int smu_get_dpm_clock_table(struct smu_context *smu,
|
||||
struct dpm_clocks *clock_table)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->get_dpm_clock_table)
|
||||
|
@ -2226,12 +2226,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
|
||||
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
struct smu_context *smu = &adev->smu;
|
||||
int res;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
@ -2247,12 +2243,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
|
||||
|
||||
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
@ -847,7 +847,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
|
||||
uint32_t i, size = 0;
|
||||
int16_t workload_type = 0;
|
||||
|
||||
if (!smu->pm_enabled || !buf)
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
|
||||
|
@ -481,8 +481,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
if (smu_power->power_context || smu_power->power_context_size != 0)
|
||||
return -EINVAL;
|
||||
|
||||
@ -499,8 +497,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
if (!smu_power->power_context || smu_power->power_context_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
@ -785,8 +781,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return 0;
|
||||
if (!table_context)
|
||||
return -EINVAL;
|
||||
|
||||
@ -837,9 +831,6 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
|
||||
return ret;
|
||||
}
|
||||
@ -934,8 +925,6 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
|
||||
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
|
||||
@ -950,9 +939,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
|
||||
int ret = 0;
|
||||
int clk_id;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
|
||||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
|
||||
return 0;
|
||||
@ -1207,9 +1193,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
|
||||
struct smu_temperature_range range;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
|
||||
|
||||
ret = smu_get_thermal_temperature_range(smu, &range);
|
||||
@ -1323,9 +1306,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
|
||||
enum smu_clk_type clk_select = 0;
|
||||
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
|
||||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
switch (clk_type) {
|
||||
|
@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
|
||||
"PD_Data_error_rate_coeff"};
|
||||
int result = 0;
|
||||
|
||||
if (!smu->pm_enabled || !buf)
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
|
||||
@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
|
||||
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user