mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge tag 'amd-drm-next-5.8-2020-05-27' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.8-2020-05-27: amdgpu: - SRIOV fixes - RAS fixes - VCN 2.5 DPG (Dynamic PowerGating) fixes - FP16 updates for display - CTF cleanups - Display fixes - Fix pcie bw sysfs handling - Enable resizeable BAR support for gmc 10.x - GFXOFF fixes for Raven - PM sysfs handling fixes amdkfd: - Fix a race condition - Warning fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200527231219.3930-1-alexander.deucher@amd.com
This commit is contained in:
commit
9ca1f474ce
@ -173,6 +173,7 @@ extern int amdgpu_gpu_recovery;
|
||||
extern int amdgpu_emu_mode;
|
||||
extern uint amdgpu_smu_memory_pool_size;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
extern int amdgpu_ras_enable;
|
||||
@ -738,6 +739,7 @@ struct amdgpu_device {
|
||||
uint32_t rev_id;
|
||||
uint32_t external_rev_id;
|
||||
unsigned long flags;
|
||||
unsigned long apu_flags;
|
||||
int usec_timeout;
|
||||
const struct amdgpu_asic_funcs *asic_funcs;
|
||||
bool shutdown;
|
||||
|
@ -1302,15 +1302,15 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* No more MMU notifiers */
|
||||
amdgpu_mn_unregister(mem->bo);
|
||||
|
||||
/* Make sure restore workers don't access the BO any more */
|
||||
bo_list_entry = &mem->validate_list;
|
||||
mutex_lock(&process_info->lock);
|
||||
list_del(&bo_list_entry->head);
|
||||
mutex_unlock(&process_info->lock);
|
||||
|
||||
/* No more MMU notifiers */
|
||||
amdgpu_mn_unregister(mem->bo);
|
||||
|
||||
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
@ -1567,9 +1567,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
chip_name = "vega12";
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
chip_name = "raven2";
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
chip_name = "picasso";
|
||||
else
|
||||
chip_name = "raven";
|
||||
@ -1617,8 +1617,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
|
||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
|
||||
amdgpu_discovery_get_gfx_info(adev);
|
||||
goto parse_soc_bounding_box;
|
||||
}
|
||||
|
||||
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
|
||||
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
|
||||
@ -1764,13 +1766,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_device_parse_gpu_info_fw(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
|
||||
amdgpu_discovery_get_gfx_info(adev);
|
||||
|
||||
amdgpu_amdkfd_device_probe(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
@ -1825,6 +1820,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
}
|
||||
/* get the vbios after the asic_funcs are set up */
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
|
||||
r = amdgpu_device_parse_gpu_info_fw(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* skip vbios handling for new handshake */
|
||||
if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
|
||||
continue;
|
||||
|
@ -523,7 +523,8 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
/* enable S/G on PCO and RV2 */
|
||||
if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
||||
if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
|
||||
(adev->apu_flags & AMD_APU_IS_PICASSO))
|
||||
domain |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
break;
|
||||
default:
|
||||
|
@ -450,6 +450,7 @@ struct amdgpu_pm {
|
||||
|
||||
/* Used for I2C access to various EEPROMs on relevant ASICs */
|
||||
struct i2c_adapter smu_i2c;
|
||||
struct list_head pm_attr_list;
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
|
@ -140,6 +140,7 @@ int amdgpu_emu_mode = 0;
|
||||
uint amdgpu_smu_memory_pool_size = 0;
|
||||
/* FBC (bit 0) disabled by default*/
|
||||
uint amdgpu_dc_feature_mask = 0;
|
||||
uint amdgpu_dc_debug_mask = 0;
|
||||
int amdgpu_async_gfx_ring = 1;
|
||||
int amdgpu_mcbp = 0;
|
||||
int amdgpu_discovery = -1;
|
||||
@ -714,6 +715,13 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1
|
||||
MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
|
||||
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: dcdebugmask (uint)
|
||||
* Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
|
||||
*/
|
||||
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
|
||||
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: abmlevel (uint)
|
||||
* Override the default ABM (Adaptive Backlight Management) level used for DC
|
||||
|
@ -163,9 +163,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
|
||||
enum amd_pm_state_type pm;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -199,9 +196,6 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
|
||||
enum amd_pm_state_type state;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (strncmp("battery", buf, strlen("battery")) == 0)
|
||||
state = POWER_STATE_TYPE_BATTERY;
|
||||
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
|
||||
@ -303,9 +297,6 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
|
||||
enum amd_dpm_forced_level level = 0xff;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -343,9 +334,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
|
||||
enum amd_dpm_forced_level current_level = 0xff;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_LOW;
|
||||
} else if (strncmp("high", buf, strlen("high")) == 0) {
|
||||
@ -383,6 +371,15 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
|
||||
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* profile_exit setting is valid only when current mode is in profile mode */
|
||||
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
|
||||
@ -475,9 +472,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
|
||||
enum amd_pm_state_type pm = 0;
|
||||
int i = 0, ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -514,9 +508,6 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (adev->pp_force_state_enabled)
|
||||
return amdgpu_get_pp_cur_state(dev, attr, buf);
|
||||
else
|
||||
@ -534,9 +525,6 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
||||
unsigned long idx;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (strlen(buf) == 1)
|
||||
adev->pp_force_state_enabled = false;
|
||||
else if (is_support_sw_smu(adev))
|
||||
@ -592,9 +580,6 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
|
||||
char *table = NULL;
|
||||
int size, ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -634,9 +619,6 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -739,9 +721,6 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
const char delimiter[3] = {' ', '\n', '\0'};
|
||||
uint32_t type;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (count > 127)
|
||||
return -EINVAL;
|
||||
|
||||
@ -831,9 +810,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -883,9 +859,6 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
|
||||
uint64_t featuremask;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtou64(buf, 0, &featuremask);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
@ -926,9 +899,6 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -985,9 +955,6 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1051,9 +1018,6 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||
int ret;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1085,9 +1049,6 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1115,9 +1076,6 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||
uint32_t mask = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1149,9 +1107,6 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1179,9 +1134,6 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
|
||||
int ret;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1215,9 +1167,6 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1245,9 +1194,6 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
|
||||
int ret;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1281,9 +1227,6 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1311,9 +1254,6 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
|
||||
int ret;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1347,9 +1287,6 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1377,9 +1314,6 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||
int ret;
|
||||
uint32_t mask = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = amdgpu_read_mask(buf, count, &mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1413,9 +1347,6 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
|
||||
uint32_t value = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1441,9 +1372,6 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
||||
int ret;
|
||||
long int value;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtol(buf, 0, &value);
|
||||
|
||||
if (ret)
|
||||
@ -1482,9 +1410,6 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
|
||||
uint32_t value = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1510,9 +1435,6 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
||||
int ret;
|
||||
long int value;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = kstrtol(buf, 0, &value);
|
||||
|
||||
if (ret)
|
||||
@ -1571,9 +1493,6 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
|
||||
ssize_t size;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1615,9 +1534,6 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return -EINVAL;
|
||||
|
||||
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (count < 2 || count > 127)
|
||||
return -EINVAL;
|
||||
@ -1671,9 +1587,6 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int r, value, size = sizeof(value);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = pm_runtime_get_sync(ddev->dev);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -1707,9 +1620,6 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int r, value, size = sizeof(value);
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = pm_runtime_get_sync(ddev->dev);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -1745,11 +1655,14 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint64_t count0, count1;
|
||||
uint64_t count0 = 0, count1 = 0;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return -ENODATA;
|
||||
|
||||
if (!adev->asic_funcs->get_pcie_usage)
|
||||
return -ENODATA;
|
||||
|
||||
ret = pm_runtime_get_sync(ddev->dev);
|
||||
if (ret < 0)
|
||||
@ -1781,9 +1694,6 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (adev->unique_id)
|
||||
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
|
||||
|
||||
@ -1815,7 +1725,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
|
||||
};
|
||||
|
||||
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
|
||||
uint32_t mask)
|
||||
uint32_t mask, enum amdgpu_device_attr_states *states)
|
||||
{
|
||||
struct device_attribute *dev_attr = &attr->dev_attr;
|
||||
const char *attr_name = dev_attr->attr.name;
|
||||
@ -1823,42 +1733,42 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
enum amd_asic_type asic_type = adev->asic_type;
|
||||
|
||||
if (!(attr->flags & mask)) {
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
|
||||
|
||||
if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
|
||||
if (asic_type <= CHIP_VEGA10)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
if (asic_type < CHIP_VEGA10)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
|
||||
if (asic_type <= CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
|
||||
if (asic_type < CHIP_VEGA20)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
|
||||
if (asic_type == CHIP_ARCTURUS)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
|
||||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_SUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
|
||||
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pcie_bw)) {
|
||||
/* PCIe Perf counters won't work on APU nodes */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(unique_id)) {
|
||||
if (!adev->unique_id)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_features)) {
|
||||
if (adev->flags & AMD_IS_APU || asic_type <= CHIP_VEGA10)
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
}
|
||||
|
||||
if (asic_type == CHIP_ARCTURUS) {
|
||||
@ -1879,27 +1789,29 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
|
||||
static int amdgpu_device_attr_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_device_attr *attr,
|
||||
uint32_t mask)
|
||||
uint32_t mask, struct list_head *attr_list)
|
||||
{
|
||||
int ret = 0;
|
||||
struct device_attribute *dev_attr = &attr->dev_attr;
|
||||
const char *name = dev_attr->attr.name;
|
||||
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
|
||||
struct amdgpu_device_attr_entry *attr_entry;
|
||||
|
||||
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
|
||||
uint32_t mask) = default_attr_update;
|
||||
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
|
||||
|
||||
BUG_ON(!attr);
|
||||
|
||||
attr_update = attr->attr_update ? attr_update : default_attr_update;
|
||||
|
||||
ret = attr_update(adev, attr, mask);
|
||||
ret = attr_update(adev, attr, mask, &attr_states);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
|
||||
name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* the attr->states maybe changed after call attr->attr_update function */
|
||||
if (attr->states == ATTR_STATE_UNSUPPORTED)
|
||||
if (attr_states == ATTR_STATE_UNSUPPORTED)
|
||||
return 0;
|
||||
|
||||
ret = device_create_file(adev->dev, dev_attr);
|
||||
@ -1908,7 +1820,14 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
|
||||
name, ret);
|
||||
}
|
||||
|
||||
attr->states = ATTR_STATE_SUPPORTED;
|
||||
attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
|
||||
if (!attr_entry)
|
||||
return -ENOMEM;
|
||||
|
||||
attr_entry->attr = attr;
|
||||
INIT_LIST_HEAD(&attr_entry->entry);
|
||||
|
||||
list_add_tail(&attr_entry->entry, attr_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1917,24 +1836,23 @@ static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_
|
||||
{
|
||||
struct device_attribute *dev_attr = &attr->dev_attr;
|
||||
|
||||
if (attr->states == ATTR_STATE_UNSUPPORTED)
|
||||
return;
|
||||
|
||||
device_remove_file(adev->dev, dev_attr);
|
||||
|
||||
attr->states = ATTR_STATE_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
|
||||
struct list_head *attr_list);
|
||||
|
||||
static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
|
||||
struct amdgpu_device_attr *attrs,
|
||||
uint32_t counts,
|
||||
uint32_t mask)
|
||||
uint32_t mask,
|
||||
struct list_head *attr_list)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t i = 0;
|
||||
|
||||
for (i = 0; i < counts; i++) {
|
||||
ret = amdgpu_device_attr_create(adev, &attrs[i], mask);
|
||||
ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
|
||||
if (ret)
|
||||
goto failed;
|
||||
}
|
||||
@ -1942,21 +1860,24 @@ static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
for (; i > 0; i--) {
|
||||
amdgpu_device_attr_remove(adev, &attrs[i]);
|
||||
}
|
||||
amdgpu_device_attr_remove_groups(adev, attr_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
|
||||
struct amdgpu_device_attr *attrs,
|
||||
uint32_t counts)
|
||||
struct list_head *attr_list)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
struct amdgpu_device_attr_entry *entry, *entry_tmp;
|
||||
|
||||
for (i = 0; i < counts; i++)
|
||||
amdgpu_device_attr_remove(adev, &attrs[i]);
|
||||
if (list_empty(attr_list))
|
||||
return ;
|
||||
|
||||
list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
|
||||
amdgpu_device_attr_remove(adev, entry->attr);
|
||||
list_del(&entry->entry);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||
@ -3367,6 +3288,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
if (adev->pm.dpm_enabled == 0)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&adev->pm.pm_attr_list);
|
||||
|
||||
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
|
||||
DRIVER_NAME, adev,
|
||||
hwmon_groups);
|
||||
@ -3393,7 +3316,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
ret = amdgpu_device_attr_create_groups(adev,
|
||||
amdgpu_device_attrs,
|
||||
ARRAY_SIZE(amdgpu_device_attrs),
|
||||
mask);
|
||||
mask,
|
||||
&adev->pm.pm_attr_list);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3410,9 +3334,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
if (adev->pm.int_hwmon_dev)
|
||||
hwmon_device_unregister(adev->pm.int_hwmon_dev);
|
||||
|
||||
amdgpu_device_attr_remove_groups(adev,
|
||||
amdgpu_device_attrs,
|
||||
ARRAY_SIZE(amdgpu_device_attrs));
|
||||
amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
|
||||
}
|
||||
|
||||
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
|
@ -47,10 +47,14 @@ enum amdgpu_device_attr_states {
|
||||
struct amdgpu_device_attr {
|
||||
struct device_attribute dev_attr;
|
||||
enum amdgpu_device_attr_flags flags;
|
||||
enum amdgpu_device_attr_states states;
|
||||
int (*attr_update)(struct amdgpu_device *adev,
|
||||
struct amdgpu_device_attr* attr,
|
||||
uint32_t mask);
|
||||
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
|
||||
uint32_t mask, enum amdgpu_device_attr_states *states);
|
||||
|
||||
};
|
||||
|
||||
struct amdgpu_device_attr_entry {
|
||||
struct list_head entry;
|
||||
struct amdgpu_device_attr *attr;
|
||||
};
|
||||
|
||||
#define to_amdgpu_device_attr(_dev_attr) \
|
||||
@ -59,7 +63,6 @@ struct amdgpu_device_attr {
|
||||
#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
|
||||
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
|
||||
.flags = _flags, \
|
||||
.states = ATTR_STATE_SUPPORTED, \
|
||||
##__VA_ARGS__, }
|
||||
|
||||
#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
|
||||
|
@ -1552,12 +1552,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
|
||||
struct amdgpu_device* adev = psp->adev;
|
||||
struct amdgpu_firmware_info *ucode =
|
||||
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
|
||||
struct amdgpu_ras *ras = psp->ras.ras;
|
||||
|
||||
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
|
||||
if (adev->in_gpu_reset) {
|
||||
if (adev->in_gpu_reset && ras && ras->supported) {
|
||||
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
|
||||
if (ret) {
|
||||
DRM_WARN("Failed to set MP1 state prepare for reload\n");
|
||||
|
@ -70,9 +70,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
fw_name = FIRMWARE_RAVEN2;
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
fw_name = FIRMWARE_PICASSO;
|
||||
else
|
||||
fw_name = FIRMWARE_RAVEN;
|
||||
|
@ -3000,10 +3000,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
|
||||
"CPU update of VM recommended only for large BAR system\n");
|
||||
|
||||
if (vm->use_cpu_for_update)
|
||||
if (vm->use_cpu_for_update) {
|
||||
/* Sync with last SDMA update/clear before switching to CPU */
|
||||
r = amdgpu_bo_sync_wait(vm->root.base.bo,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
|
||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||
else
|
||||
} else {
|
||||
vm->update_funcs = &amdgpu_vm_sdma_funcs;
|
||||
}
|
||||
dma_fence_put(vm->last_update);
|
||||
vm->last_update = NULL;
|
||||
vm->is_compute_context = true;
|
||||
|
@ -325,9 +325,18 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
|
||||
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
char node[10];
|
||||
memset(node, 0, sizeof(node));
|
||||
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
|
||||
sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
|
||||
sysfs_remove_link(hive->kobj, adev->ddev->unique);
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_error);
|
||||
|
||||
if (adev != hive->adev)
|
||||
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
|
||||
|
||||
sprintf(node, "node%d", hive->number_devices);
|
||||
sysfs_remove_link(hive->kobj, node);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -583,14 +592,14 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||||
if (!hive)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(hive->number_devices--)) {
|
||||
task_barrier_rem_task(&hive->tb);
|
||||
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
|
||||
if(!(--hive->number_devices)){
|
||||
amdgpu_xgmi_sysfs_destroy(adev, hive);
|
||||
mutex_destroy(&hive->hive_lock);
|
||||
mutex_destroy(&hive->reset_lock);
|
||||
} else {
|
||||
task_barrier_rem_task(&hive->tb);
|
||||
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
}
|
||||
|
||||
return psp_xgmi_terminate(&adev->psp);
|
||||
|
@ -959,7 +959,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_RAVEN:
|
||||
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
|
||||
ARRAY_SIZE(golden_settings_gc_9_1));
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_9_1_rv2,
|
||||
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
|
||||
@ -1274,7 +1274,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
||||
case CHIP_VEGA20:
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
|
||||
if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
|
||||
(adev->apu_flags & AMD_APU_IS_PICASSO)) &&
|
||||
((!is_raven_kicker(adev) &&
|
||||
adev->gfx.rlc_fw_version < 531) ||
|
||||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
@ -1617,9 +1618,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "vega20";
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
chip_name = "raven2";
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
chip_name = "picasso";
|
||||
else
|
||||
chip_name = "raven";
|
||||
@ -2119,7 +2120,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
|
||||
else
|
||||
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
|
||||
@ -2968,8 +2969,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
||||
*/
|
||||
if (adev->gfx.rlc.is_rlc_v2_1) {
|
||||
if (adev->asic_type == CHIP_VEGA12 ||
|
||||
(adev->asic_type == CHIP_RAVEN &&
|
||||
adev->rev_id >= 8))
|
||||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_0_enable_save_restore_machine(adev);
|
||||
}
|
||||
@ -6881,7 +6881,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
|
||||
adev->gds.gds_compute_max_wave_id = 0x27f;
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 0x8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
|
||||
else
|
||||
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
|
||||
|
@ -80,7 +80,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
/*
|
||||
* Raven2 has a HW issue that it is unable to use the
|
||||
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
|
||||
|
@ -686,17 +686,23 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
*/
|
||||
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Could aper size report 0 ? */
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
int r;
|
||||
|
||||
/* size in MB on si */
|
||||
adev->gmc.mc_vram_size =
|
||||
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
|
||||
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_device_resize_fb_bar(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
|
||||
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->gmc.visible_vram_size = adev->gmc.aper_size;
|
||||
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
|
||||
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
|
||||
|
||||
|
@ -441,9 +441,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
|
||||
return ((vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) &&
|
||||
(!amdgpu_sriov_vf(adev)) &&
|
||||
(!(adev->asic_type == CHIP_RAVEN &&
|
||||
adev->rev_id < 0x8 &&
|
||||
adev->pdev->device == 0x15d8)));
|
||||
(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
|
||||
(adev->apu_flags & AMD_APU_IS_PICASSO))));
|
||||
}
|
||||
|
||||
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
|
||||
|
@ -268,7 +268,6 @@ static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
|
||||
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
|
||||
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
|
||||
| JPEG_CGC_GATE__JPEG2_DEC_MASK
|
||||
| JPEG_CGC_GATE__JPEG_ENC_MASK
|
||||
| JPEG_CGC_GATE__JMCIF_MASK
|
||||
| JPEG_CGC_GATE__JRBBM_MASK);
|
||||
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
|
||||
|
@ -96,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
/*
|
||||
* Raven2 has a HW issue that it is unable to use the vram which
|
||||
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
|
||||
|
@ -548,13 +548,6 @@ static bool nv_need_full_reset(struct amdgpu_device *adev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nv_get_pcie_usage(struct amdgpu_device *adev,
|
||||
uint64_t *count0,
|
||||
uint64_t *count1)
|
||||
{
|
||||
/*TODO*/
|
||||
}
|
||||
|
||||
static bool nv_need_reset_on_init(struct amdgpu_device *adev)
|
||||
{
|
||||
#if 0
|
||||
@ -629,7 +622,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
|
||||
.invalidate_hdp = &nv_invalidate_hdp,
|
||||
.init_doorbell_index = &nv_init_doorbell_index,
|
||||
.need_full_reset = &nv_need_full_reset,
|
||||
.get_pcie_usage = &nv_get_pcie_usage,
|
||||
.need_reset_on_init = &nv_need_reset_on_init,
|
||||
.get_pcie_replay_count = &nv_get_pcie_replay_count,
|
||||
.supports_baco = &nv_asic_supports_baco,
|
||||
|
@ -55,9 +55,9 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 0x8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
chip_name = "raven2";
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
chip_name = "picasso";
|
||||
else
|
||||
chip_name = "raven";
|
||||
|
@ -486,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_4_1,
|
||||
ARRAY_SIZE(golden_settings_sdma_4_1));
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_rv2,
|
||||
ARRAY_SIZE(golden_settings_sdma_rv2));
|
||||
@ -575,9 +575,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "vega20";
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 8)
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
chip_name = "raven2";
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
chip_name = "picasso";
|
||||
else
|
||||
chip_name = "raven";
|
||||
|
@ -564,7 +564,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
||||
static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
/* original raven doesn't have full asic reset */
|
||||
if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
|
||||
if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
|
||||
!(adev->apu_flags & AMD_APU_IS_RAVEN2))
|
||||
return 0;
|
||||
|
||||
switch (soc15_asic_reset_method(adev)) {
|
||||
@ -1129,16 +1130,23 @@ static int soc15_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->pdev->device == 0x15dd)
|
||||
adev->apu_flags |= AMD_APU_IS_RAVEN;
|
||||
if (adev->pdev->device == 0x15d8)
|
||||
adev->apu_flags |= AMD_APU_IS_PICASSO;
|
||||
if (adev->rev_id >= 0x8)
|
||||
adev->apu_flags |= AMD_APU_IS_RAVEN2;
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
adev->external_rev_id = adev->rev_id + 0x79;
|
||||
else if (adev->pdev->device == 0x15d8)
|
||||
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
|
||||
adev->external_rev_id = adev->rev_id + 0x41;
|
||||
else if (adev->rev_id == 1)
|
||||
adev->external_rev_id = adev->rev_id + 0x20;
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
|
||||
if (adev->rev_id >= 0x8) {
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
@ -1156,7 +1164,7 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_VCN_MGCG;
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
|
||||
} else if (adev->pdev->device == 0x15d8) {
|
||||
} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
@ -1222,6 +1230,7 @@ static int soc15_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
|
@ -1453,11 +1453,6 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
|
||||
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
|
||||
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
|
||||
RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
|
||||
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
|
||||
|
||||
/* Unstall DPG */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
|
||||
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
|
||||
@ -1519,10 +1514,6 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
|
||||
WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
|
||||
lower_32_bits(ring->wptr) | 0x80000000);
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
|
||||
|
@ -37,7 +37,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
|
||||
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
|
||||
if (vmid < dev->vm_info.first_vmid_kfd ||
|
||||
vmid > dev->vm_info.last_vmid_kfd)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
|
||||
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
|
||||
@ -69,7 +69,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
|
||||
|
||||
/* If there is no valid PASID, it's likely a bug */
|
||||
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/* Interrupt types we care about: various signals and faults.
|
||||
* They will be forwarded to a work queue (see below).
|
||||
|
@ -192,7 +192,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
|
||||
|
||||
dev_warn_ratelimited(kfd_device,
|
||||
"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
|
||||
PCI_BUS_NUM(pdev->devfn),
|
||||
pdev->bus->number,
|
||||
PCI_SLOT(pdev->devfn),
|
||||
PCI_FUNC(pdev->devfn),
|
||||
pasid,
|
||||
|
@ -918,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
|
||||
adev->dm.dc->debug.force_single_disp_pipe_split = false;
|
||||
adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
|
||||
}
|
||||
|
||||
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
|
||||
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
|
||||
adev->dm.dc->debug.disable_stutter = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
|
||||
adev->dm.dc->debug.disable_dsc = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
|
||||
adev->dm.dc->debug.disable_clock_gate = true;
|
||||
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
@ -1521,10 +1538,114 @@ static int dm_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int dm_enable_vblank(struct drm_crtc *crtc);
|
||||
static void dm_disable_vblank(struct drm_crtc *crtc);
|
||||
|
||||
static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
|
||||
struct dc_state *state, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc;
|
||||
int rc = -EBUSY;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < state->stream_count; i++) {
|
||||
acrtc = get_crtc_by_otg_inst(
|
||||
adev, state->stream_status[i].primary_otg_inst);
|
||||
|
||||
if (acrtc && state->stream_status[i].plane_count != 0) {
|
||||
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
|
||||
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||
DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
|
||||
acrtc->crtc_id, enable ? "en" : "dis", rc);
|
||||
if (rc)
|
||||
DRM_WARN("Failed to %s pflip interrupts\n",
|
||||
enable ? "enable" : "disable");
|
||||
|
||||
if (enable) {
|
||||
rc = dm_enable_vblank(&acrtc->base);
|
||||
if (rc)
|
||||
DRM_WARN("Failed to enable vblank interrupts\n");
|
||||
} else {
|
||||
dm_disable_vblank(&acrtc->base);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
|
||||
{
|
||||
struct dc_state *context = NULL;
|
||||
enum dc_status res = DC_ERROR_UNEXPECTED;
|
||||
int i;
|
||||
struct dc_stream_state *del_streams[MAX_PIPES];
|
||||
int del_streams_count = 0;
|
||||
|
||||
memset(del_streams, 0, sizeof(del_streams));
|
||||
|
||||
context = dc_create_state(dc);
|
||||
if (context == NULL)
|
||||
goto context_alloc_fail;
|
||||
|
||||
dc_resource_state_copy_construct_current(dc, context);
|
||||
|
||||
/* First remove from context all streams */
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
del_streams[del_streams_count++] = stream;
|
||||
}
|
||||
|
||||
/* Remove all planes for removed streams and then remove the streams */
|
||||
for (i = 0; i < del_streams_count; i++) {
|
||||
if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
|
||||
res = DC_FAIL_DETACH_SURFACES;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
|
||||
if (res != DC_OK)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
||||
res = dc_validate_global_state(dc, context, false);
|
||||
|
||||
if (res != DC_OK) {
|
||||
DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
res = dc_commit_state(dc, context);
|
||||
|
||||
fail:
|
||||
dc_release_state(context);
|
||||
|
||||
context_alloc_fail:
|
||||
return res;
|
||||
}
|
||||
|
||||
static int dm_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
int ret = 0;
|
||||
|
||||
if (adev->in_gpu_reset) {
|
||||
mutex_lock(&dm->dc_lock);
|
||||
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
|
||||
|
||||
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
|
||||
|
||||
amdgpu_dm_commit_zero_streams(dm->dc);
|
||||
|
||||
amdgpu_dm_irq_suspend(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
WARN_ON(adev->dm.cached_state);
|
||||
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
||||
@ -1640,6 +1761,46 @@ static void emulated_link_detect(struct dc_link *link)
|
||||
|
||||
}
|
||||
|
||||
static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
struct amdgpu_display_manager *dm)
|
||||
{
|
||||
struct {
|
||||
struct dc_surface_update surface_updates[MAX_SURFACES];
|
||||
struct dc_plane_info plane_infos[MAX_SURFACES];
|
||||
struct dc_scaling_info scaling_infos[MAX_SURFACES];
|
||||
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
} * bundle;
|
||||
int k, m;
|
||||
|
||||
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
|
||||
|
||||
if (!bundle) {
|
||||
dm_error("Failed to allocate update bundle\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
for (k = 0; k < dc_state->stream_count; k++) {
|
||||
bundle->stream_update.stream = dc_state->streams[k];
|
||||
|
||||
for (m = 0; m < dc_state->stream_status->plane_count; m++) {
|
||||
bundle->surface_updates[m].surface =
|
||||
dc_state->stream_status->plane_states[m];
|
||||
bundle->surface_updates[m].surface->force_full_update =
|
||||
true;
|
||||
}
|
||||
dc_commit_updates_for_stream(
|
||||
dm->dc, bundle->surface_updates,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->streams[k], &bundle->stream_update, dc_state);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
kfree(bundle);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int dm_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
@ -1656,8 +1817,44 @@ static int dm_resume(void *handle)
|
||||
struct dm_plane_state *dm_new_plane_state;
|
||||
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
int i, r;
|
||||
struct dc_state *dc_state;
|
||||
int i, r, j;
|
||||
|
||||
if (adev->in_gpu_reset) {
|
||||
dc_state = dm->cached_dc_state;
|
||||
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
dc_resume(dm->dc);
|
||||
|
||||
amdgpu_dm_irq_resume_early(adev);
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++) {
|
||||
dc_state->streams[i]->mode_changed = true;
|
||||
for (j = 0; j < dc_state->stream_status->plane_count; j++) {
|
||||
dc_state->stream_status->plane_states[j]->update_flags.raw
|
||||
= 0xffffffff;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(!dc_commit_state(dm->dc, dc_state));
|
||||
|
||||
dm_gpureset_commit_state(dm->cached_dc_state, dm);
|
||||
|
||||
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
|
||||
|
||||
dc_release_state(dm->cached_dc_state);
|
||||
dm->cached_dc_state = NULL;
|
||||
|
||||
amdgpu_dm_irq_resume_late(adev);
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
|
||||
dc_release_state(dm_state->context);
|
||||
dm_state->context = dc_create_state(dm->dc);
|
||||
@ -3022,9 +3219,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
|
||||
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
|
||||
|
||||
/* No userspace support. */
|
||||
dm->dc->debug.disable_tri_buf = true;
|
||||
|
||||
@ -3651,6 +3845,10 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
||||
case DRM_FORMAT_ARGB16161616F:
|
||||
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
|
||||
break;
|
||||
case DRM_FORMAT_XBGR16161616F:
|
||||
case DRM_FORMAT_ABGR16161616F:
|
||||
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR(
|
||||
"Unsupported screen format %s\n",
|
||||
@ -3822,8 +4020,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
|
||||
|
||||
static enum dc_color_depth
|
||||
convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
const struct drm_connector_state *state,
|
||||
bool is_y420)
|
||||
bool is_y420, int requested_bpc)
|
||||
{
|
||||
uint8_t bpc;
|
||||
|
||||
@ -3843,10 +4040,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
bpc = bpc ? bpc : 8;
|
||||
}
|
||||
|
||||
if (!state)
|
||||
state = connector->state;
|
||||
|
||||
if (state) {
|
||||
if (requested_bpc > 0) {
|
||||
/*
|
||||
* Cap display bpc based on the user requested value.
|
||||
*
|
||||
@ -3855,7 +4049,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
* or if this was called outside of atomic check, so it
|
||||
* can't be used directly.
|
||||
*/
|
||||
bpc = min(bpc, state->max_requested_bpc);
|
||||
bpc = min_t(u8, bpc, requested_bpc);
|
||||
|
||||
/* Round down to the nearest even number. */
|
||||
bpc = bpc - (bpc & 1);
|
||||
@ -3977,7 +4171,8 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
const struct drm_display_mode *mode_in,
|
||||
const struct drm_connector *connector,
|
||||
const struct drm_connector_state *connector_state,
|
||||
const struct dc_stream_state *old_stream)
|
||||
const struct dc_stream_state *old_stream,
|
||||
int requested_bpc)
|
||||
{
|
||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
@ -4007,8 +4202,9 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
|
||||
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
|
||||
timing_out->display_color_depth = convert_color_depth_from_display_info(
|
||||
connector, connector_state,
|
||||
(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
|
||||
connector,
|
||||
(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
|
||||
requested_bpc);
|
||||
timing_out->scan_type = SCANNING_TYPE_NODATA;
|
||||
timing_out->hdmi_vic = 0;
|
||||
|
||||
@ -4214,7 +4410,8 @@ static struct dc_stream_state *
|
||||
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
const struct drm_display_mode *drm_mode,
|
||||
const struct dm_connector_state *dm_state,
|
||||
const struct dc_stream_state *old_stream)
|
||||
const struct dc_stream_state *old_stream,
|
||||
int requested_bpc)
|
||||
{
|
||||
struct drm_display_mode *preferred_mode = NULL;
|
||||
struct drm_connector *drm_connector;
|
||||
@ -4299,10 +4496,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
*/
|
||||
if (!scale || mode_refresh != preferred_refresh)
|
||||
fill_stream_properties_from_drm_display_mode(stream,
|
||||
&mode, &aconnector->base, con_state, NULL);
|
||||
&mode, &aconnector->base, con_state, NULL, requested_bpc);
|
||||
else
|
||||
fill_stream_properties_from_drm_display_mode(stream,
|
||||
&mode, &aconnector->base, con_state, old_stream);
|
||||
&mode, &aconnector->base, con_state, old_stream, requested_bpc);
|
||||
|
||||
stream->timing.flags.DSC = 0;
|
||||
|
||||
@ -4821,16 +5018,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
|
||||
create_eml_sink(aconnector);
|
||||
}
|
||||
|
||||
static struct dc_stream_state *
|
||||
create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
const struct drm_display_mode *drm_mode,
|
||||
const struct dm_connector_state *dm_state,
|
||||
const struct dc_stream_state *old_stream)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct amdgpu_device *adev = connector->dev->dev_private;
|
||||
struct dc_stream_state *stream;
|
||||
int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
|
||||
enum dc_status dc_result = DC_OK;
|
||||
|
||||
do {
|
||||
stream = create_stream_for_sink(aconnector, drm_mode,
|
||||
dm_state, old_stream,
|
||||
requested_bpc);
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
dc_result = dc_validate_stream(adev->dm.dc, stream);
|
||||
|
||||
if (dc_result != DC_OK) {
|
||||
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
|
||||
drm_mode->hdisplay,
|
||||
drm_mode->vdisplay,
|
||||
drm_mode->clock,
|
||||
dc_result);
|
||||
|
||||
dc_stream_release(stream);
|
||||
stream = NULL;
|
||||
requested_bpc -= 2; /* lower bpc to retry validation */
|
||||
}
|
||||
|
||||
} while (stream == NULL && requested_bpc >= 6);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
int result = MODE_ERROR;
|
||||
struct dc_sink *dc_sink;
|
||||
struct amdgpu_device *adev = connector->dev->dev_private;
|
||||
/* TODO: Unhardcode stream count */
|
||||
struct dc_stream_state *stream;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
enum dc_status dc_result = DC_OK;
|
||||
|
||||
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
|
||||
@ -4851,24 +5086,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
|
||||
goto fail;
|
||||
}
|
||||
|
||||
stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dc_result = dc_validate_stream(adev->dm.dc, stream);
|
||||
|
||||
if (dc_result == DC_OK)
|
||||
stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
|
||||
if (stream) {
|
||||
dc_stream_release(stream);
|
||||
result = MODE_OK;
|
||||
else
|
||||
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
|
||||
mode->hdisplay,
|
||||
mode->vdisplay,
|
||||
mode->clock,
|
||||
dc_result);
|
||||
|
||||
dc_stream_release(stream);
|
||||
}
|
||||
|
||||
fail:
|
||||
/* TODO: error handling*/
|
||||
@ -5191,10 +5413,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
return 0;
|
||||
|
||||
if (!state->duplicated) {
|
||||
int max_bpc = conn_state->max_requested_bpc;
|
||||
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
|
||||
aconnector->force_yuv420_output;
|
||||
color_depth = convert_color_depth_from_display_info(connector, conn_state,
|
||||
is_y420);
|
||||
color_depth = convert_color_depth_from_display_info(connector,
|
||||
is_y420,
|
||||
max_bpc);
|
||||
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
|
||||
clock = adjusted_mode->clock;
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
|
||||
@ -5566,6 +5790,8 @@ static int get_plane_formats(const struct drm_plane *plane,
|
||||
if (plane_cap && plane_cap->pixel_format_support.fp16) {
|
||||
formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
|
||||
formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
|
||||
formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
|
||||
formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -7622,10 +7848,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
goto skip_modeset;
|
||||
|
||||
new_stream = create_stream_for_sink(aconnector,
|
||||
&new_crtc_state->mode,
|
||||
dm_new_conn_state,
|
||||
dm_old_crtc_state->stream);
|
||||
new_stream = create_validate_stream_for_sink(aconnector,
|
||||
&new_crtc_state->mode,
|
||||
dm_new_conn_state,
|
||||
dm_old_crtc_state->stream);
|
||||
|
||||
/*
|
||||
* we can have no stream on ACTION_SET if a display
|
||||
@ -7910,13 +8136,6 @@ static int dm_update_plane_state(struct dc *dc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
|
||||
new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
|
||||
DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
|
||||
new_plane_state->crtc_x, new_plane_state->crtc_y);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -315,6 +315,7 @@ struct amdgpu_display_manager {
|
||||
#endif
|
||||
|
||||
struct drm_atomic_state *cached_state;
|
||||
struct dc_state *cached_dc_state;
|
||||
|
||||
struct dm_comressor_info compressor;
|
||||
|
||||
|
@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
|
||||
struct dc_debug_options *dbg,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
/**
|
||||
* Workaround for avoiding pipe-split in cases where we'd split
|
||||
* planes that are too small, resulting in splits that aren't
|
||||
* valid for the scaler.
|
||||
*/
|
||||
if (pipe->plane_state &&
|
||||
(pipe->plane_state->dst_rect.width <= 16 ||
|
||||
pipe->plane_state->dst_rect.height <= 16 ||
|
||||
pipe->plane_state->src_rect.width <= 16 ||
|
||||
pipe->plane_state->src_rect.height <= 16)) {
|
||||
hack_disable_optional_pipe_split(v);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
|
||||
hack_disable_optional_pipe_split(v);
|
||||
|
||||
@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
|
||||
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
|
||||
}
|
||||
|
||||
|
||||
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
|
||||
{
|
||||
/* for low power RV2 variants, the highest voltage level we want is 0 */
|
||||
|
@ -3245,6 +3245,10 @@ void core_link_enable_stream(
|
||||
dp_set_dsc_enable(pipe_ctx, true);
|
||||
|
||||
}
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
core_link_set_avmute(pipe_ctx, false);
|
||||
}
|
||||
}
|
||||
|
||||
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
@ -3257,6 +3261,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
core_link_set_avmute(pipe_ctx, true);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
||||
update_psp_stream_config(pipe_ctx, true);
|
||||
#endif
|
||||
|
@ -219,6 +219,30 @@ static enum dpcd_training_patterns
|
||||
return dpcd_tr_pattern;
|
||||
}
|
||||
|
||||
static uint8_t dc_dp_initialize_scrambling_data_symbols(
|
||||
struct dc_link *link,
|
||||
enum dc_dp_training_pattern pattern)
|
||||
{
|
||||
uint8_t disable_scrabled_data_symbols = 0;
|
||||
|
||||
switch (pattern) {
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_1:
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_2:
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_3:
|
||||
disable_scrabled_data_symbols = 1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_SEQUENCE_4:
|
||||
disable_scrabled_data_symbols = 0;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
|
||||
__func__, pattern);
|
||||
break;
|
||||
}
|
||||
return disable_scrabled_data_symbols;
|
||||
}
|
||||
|
||||
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
|
||||
{
|
||||
return (!link->is_lttpr_mode_transparent && offset != 0);
|
||||
@ -251,6 +275,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
|
||||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
|
||||
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
|
||||
|
||||
dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
|
||||
dc_dp_initialize_scrambling_data_symbols(link, pattern);
|
||||
|
||||
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
|
||||
= dpcd_pattern.raw;
|
||||
|
||||
|
@ -532,6 +532,24 @@ static inline void get_vp_scan_direction(
|
||||
*flip_horz_scan_dir = !*flip_horz_scan_dir;
|
||||
}
|
||||
|
||||
int get_num_mpc_splits(struct pipe_ctx *pipe)
|
||||
{
|
||||
int mpc_split_count = 0;
|
||||
struct pipe_ctx *other_pipe = pipe->bottom_pipe;
|
||||
|
||||
while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
|
||||
mpc_split_count++;
|
||||
other_pipe = other_pipe->bottom_pipe;
|
||||
}
|
||||
other_pipe = pipe->top_pipe;
|
||||
while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
|
||||
mpc_split_count++;
|
||||
other_pipe = other_pipe->top_pipe;
|
||||
}
|
||||
|
||||
return mpc_split_count;
|
||||
}
|
||||
|
||||
int get_num_odm_splits(struct pipe_ctx *pipe)
|
||||
{
|
||||
int odm_split_count = 0;
|
||||
@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
|
||||
/*Check for mpc split*/
|
||||
struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
|
||||
|
||||
*split_count = get_num_mpc_splits(pipe_ctx);
|
||||
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
|
||||
(*split_idx)++;
|
||||
(*split_count)++;
|
||||
split_pipe = split_pipe->top_pipe;
|
||||
}
|
||||
split_pipe = pipe_ctx->bottom_pipe;
|
||||
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
|
||||
(*split_count)++;
|
||||
split_pipe = split_pipe->bottom_pipe;
|
||||
}
|
||||
} else {
|
||||
/*Get odm split index*/
|
||||
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
|
||||
@ -2666,6 +2679,9 @@ bool pipe_need_reprogram(
|
||||
false == pipe_ctx_old->stream->dpms_off)
|
||||
return true;
|
||||
|
||||
if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.pixel_format_support = {
|
||||
.argb8888 = true,
|
||||
.nv12 = false,
|
||||
.fp16 = false
|
||||
.fp16 = true
|
||||
},
|
||||
|
||||
.max_upscale_factor = {
|
||||
|
@ -516,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.pixel_format_support = {
|
||||
.argb8888 = true,
|
||||
.nv12 = false,
|
||||
.fp16 = false
|
||||
.fp16 = true
|
||||
},
|
||||
|
||||
.max_upscale_factor = {
|
||||
|
@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
|
||||
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
|
||||
{
|
||||
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
|
||||
|
||||
/*
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
|
||||
|
@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc)
|
||||
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
|
||||
if (allow_self_fresh_force_enable == false &&
|
||||
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
|
||||
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
|
||||
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
|
||||
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
}
|
||||
|
||||
@ -1682,12 +1683,81 @@ void dcn10_pipe_control_lock(
|
||||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
|
||||
/**
|
||||
* delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
|
||||
*
|
||||
* Software keepout workaround to prevent cursor update locking from stalling
|
||||
* out cursor updates indefinitely or from old values from being retained in
|
||||
* the case where the viewport changes in the same frame as the cursor.
|
||||
*
|
||||
* The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
|
||||
* too close to VUPDATE, then stall out until VUPDATE finishes.
|
||||
*
|
||||
* TODO: Optimize cursor programming to be once per frame before VUPDATE
|
||||
* to avoid the need for this workaround.
|
||||
*/
|
||||
static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct crtc_position position;
|
||||
uint32_t vupdate_start, vupdate_end;
|
||||
unsigned int lines_to_vupdate, us_to_vupdate, vpos;
|
||||
unsigned int us_per_line, us_vupdate;
|
||||
|
||||
if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
|
||||
return;
|
||||
|
||||
if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
|
||||
return;
|
||||
|
||||
dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
|
||||
&vupdate_end);
|
||||
|
||||
dc->hwss.get_position(&pipe_ctx, 1, &position);
|
||||
vpos = position.vertical_count;
|
||||
|
||||
/* Avoid wraparound calculation issues */
|
||||
vupdate_start += stream->timing.v_total;
|
||||
vupdate_end += stream->timing.v_total;
|
||||
vpos += stream->timing.v_total;
|
||||
|
||||
if (vpos <= vupdate_start) {
|
||||
/* VPOS is in VACTIVE or back porch. */
|
||||
lines_to_vupdate = vupdate_start - vpos;
|
||||
} else if (vpos > vupdate_end) {
|
||||
/* VPOS is in the front porch. */
|
||||
return;
|
||||
} else {
|
||||
/* VPOS is in VUPDATE. */
|
||||
lines_to_vupdate = 0;
|
||||
}
|
||||
|
||||
/* Calculate time until VUPDATE in microseconds. */
|
||||
us_per_line =
|
||||
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
|
||||
us_to_vupdate = lines_to_vupdate * us_per_line;
|
||||
|
||||
/* 70 us is a conservative estimate of cursor update time*/
|
||||
if (us_to_vupdate > 70)
|
||||
return;
|
||||
|
||||
/* Stall out until the cursor update completes. */
|
||||
if (vupdate_end < vupdate_start)
|
||||
vupdate_end += stream->timing.v_total;
|
||||
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
|
||||
udelay(us_to_vupdate + us_vupdate);
|
||||
}
|
||||
|
||||
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
|
||||
{
|
||||
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
|
||||
if (!pipe || pipe->top_pipe)
|
||||
return;
|
||||
|
||||
/* Prevent cursor lock from stalling out cursor updates. */
|
||||
if (lock)
|
||||
delay_cursor_until_vupdate(dc, pipe);
|
||||
|
||||
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
|
||||
pipe->stream_res.opp->inst, lock);
|
||||
}
|
||||
@ -3301,7 +3371,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
|
||||
return vertical_line_start;
|
||||
}
|
||||
|
||||
static void dcn10_calc_vupdate_position(
|
||||
void dcn10_calc_vupdate_position(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
uint32_t *start_line,
|
||||
|
@ -34,6 +34,11 @@ struct dc;
|
||||
void dcn10_hw_sequencer_construct(struct dc *dc);
|
||||
|
||||
int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
|
||||
void dcn10_calc_vupdate_position(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
uint32_t *start_line,
|
||||
uint32_t *end_line);
|
||||
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
|
||||
enum dc_status dcn10_enable_stream_timing(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
|
@ -72,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.set_clock = dcn10_set_clock,
|
||||
.get_clock = dcn10_get_clock,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
};
|
||||
|
@ -80,6 +80,7 @@ struct dcn20_hubbub {
|
||||
const struct dcn_hubbub_mask *masks;
|
||||
unsigned int debug_test_index_pstate;
|
||||
struct dcn_watermark_set watermarks;
|
||||
int num_vmid;
|
||||
struct dcn20_vmid vmid[16];
|
||||
unsigned int detile_buf_size;
|
||||
};
|
||||
|
@ -83,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
||||
.init_vm_ctx = dcn20_init_vm_ctx,
|
||||
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
};
|
||||
|
@ -1663,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
|
||||
}
|
||||
|
||||
|
||||
static void acquire_dsc(struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
void dcn20_acquire_dsc(const struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
struct display_stream_compressor **dsc,
|
||||
int pipe_idx)
|
||||
{
|
||||
int i;
|
||||
const struct resource_pool *pool = dc->res_pool;
|
||||
struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
|
||||
|
||||
ASSERT(*dsc == NULL);
|
||||
ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
|
||||
*dsc = NULL;
|
||||
|
||||
/* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
|
||||
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
|
||||
*dsc = pool->dscs[pipe_idx];
|
||||
res_ctx->is_dsc_acquired[pipe_idx] = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return old DSC to avoid the need for re-programming */
|
||||
if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
|
||||
*dsc = dsc_old;
|
||||
res_ctx->is_dsc_acquired[dsc_old->inst] = true;
|
||||
return ;
|
||||
}
|
||||
|
||||
/* Find first free DSC */
|
||||
for (i = 0; i < pool->res_cap->num_dsc; i++)
|
||||
if (!res_ctx->is_dsc_acquired[i]) {
|
||||
@ -1710,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
|
||||
{
|
||||
enum dc_status result = DC_OK;
|
||||
int i;
|
||||
const struct resource_pool *pool = dc->res_pool;
|
||||
|
||||
/* Get a DSC if required and available */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
@ -1722,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
|
||||
if (pipe_ctx->stream_res.dsc)
|
||||
continue;
|
||||
|
||||
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
|
||||
dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
|
||||
|
||||
/* The number of DSCs can be less than the number of pipes */
|
||||
if (!pipe_ctx->stream_res.dsc) {
|
||||
@ -1850,12 +1859,13 @@ static void swizzle_to_dml_params(
|
||||
}
|
||||
|
||||
bool dcn20_split_stream_for_odm(
|
||||
const struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct pipe_ctx *prev_odm_pipe,
|
||||
struct pipe_ctx *next_odm_pipe)
|
||||
{
|
||||
int pipe_idx = next_odm_pipe->pipe_idx;
|
||||
const struct resource_pool *pool = dc->res_pool;
|
||||
|
||||
*next_odm_pipe = *prev_odm_pipe;
|
||||
|
||||
@ -1913,7 +1923,7 @@ bool dcn20_split_stream_for_odm(
|
||||
}
|
||||
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
|
||||
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
|
||||
acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
|
||||
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
|
||||
ASSERT(next_odm_pipe->stream_res.dsc);
|
||||
if (next_odm_pipe->stream_res.dsc == NULL)
|
||||
return false;
|
||||
@ -2576,27 +2586,6 @@ static void dcn20_merge_pipes_for_validate(
|
||||
}
|
||||
}
|
||||
|
||||
int dcn20_find_previous_split_count(struct pipe_ctx *pipe)
|
||||
{
|
||||
int previous_split = 1;
|
||||
struct pipe_ctx *current_pipe = pipe;
|
||||
|
||||
while (current_pipe->bottom_pipe) {
|
||||
if (current_pipe->plane_state != current_pipe->bottom_pipe->plane_state)
|
||||
break;
|
||||
previous_split++;
|
||||
current_pipe = current_pipe->bottom_pipe;
|
||||
}
|
||||
current_pipe = pipe;
|
||||
while (current_pipe->top_pipe) {
|
||||
if (current_pipe->plane_state != current_pipe->top_pipe->plane_state)
|
||||
break;
|
||||
previous_split++;
|
||||
current_pipe = current_pipe->top_pipe;
|
||||
}
|
||||
return previous_split;
|
||||
}
|
||||
|
||||
int dcn20_validate_apply_pipe_split_flags(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
@ -2608,6 +2597,8 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
int plane_count = 0;
|
||||
bool force_split = false;
|
||||
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
|
||||
struct vba_vars_st *v = &context->bw_ctx.dml.vba;
|
||||
int max_mpc_comb = v->maxMpcComb;
|
||||
|
||||
if (context->stream_count > 1) {
|
||||
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
|
||||
@ -2615,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
} else if (dc->debug.force_single_disp_pipe_split)
|
||||
force_split = true;
|
||||
|
||||
/* TODO: fix dc bugs and remove this split threshold thing */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
/**
|
||||
* Workaround for avoiding pipe-split in cases where we'd split
|
||||
* planes that are too small, resulting in splits that aren't
|
||||
* valid for the scaler.
|
||||
*/
|
||||
if (pipe->plane_state &&
|
||||
(pipe->plane_state->dst_rect.width <= 16 ||
|
||||
pipe->plane_state->dst_rect.height <= 16 ||
|
||||
pipe->plane_state->src_rect.width <= 16 ||
|
||||
pipe->plane_state->src_rect.height <= 16))
|
||||
avoid_split = true;
|
||||
|
||||
/* TODO: fix dc bugs and remove this split threshold thing */
|
||||
if (pipe->stream && !pipe->prev_odm_pipe &&
|
||||
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
|
||||
++plane_count;
|
||||
@ -2628,15 +2631,13 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
|
||||
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
|
||||
if (avoid_split) {
|
||||
int max_mpc_comb = context->bw_ctx.dml.vba.maxMpcComb;
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
|
||||
if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1 &&
|
||||
context->bw_ctx.dml.vba.ModeSupport[vlevel][0])
|
||||
if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
|
||||
v->ModeSupport[vlevel][0])
|
||||
break;
|
||||
/* Impossible to not split this pipe */
|
||||
if (vlevel > context->bw_ctx.dml.soc.num_states)
|
||||
@ -2645,21 +2646,21 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
max_mpc_comb = 0;
|
||||
pipe_idx++;
|
||||
}
|
||||
context->bw_ctx.dml.vba.maxMpcComb = max_mpc_comb;
|
||||
v->maxMpcComb = max_mpc_comb;
|
||||
}
|
||||
|
||||
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
|
||||
int pipe_plane = v->pipe_plane[pipe_idx];
|
||||
bool split4mpc = context->stream_count == 1 && plane_count == 1
|
||||
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
|
||||
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
if (force_split
|
||||
|| context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) {
|
||||
if (context->stream_count == 1 && plane_count == 1
|
||||
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4)
|
||||
if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) {
|
||||
if (split4mpc)
|
||||
split[i] = 4;
|
||||
else
|
||||
split[i] = 2;
|
||||
@ -2675,66 +2676,72 @@ int dcn20_validate_apply_pipe_split_flags(
|
||||
split[i] = 2;
|
||||
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
|
||||
split[i] = 2;
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
|
||||
v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
|
||||
}
|
||||
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
|
||||
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
|
||||
v->ODMCombineEnabled[pipe_plane] =
|
||||
v->ODMCombineEnablePerState[vlevel][pipe_plane];
|
||||
|
||||
if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
|
||||
/*Already split odm pipe tree, don't try to split again*/
|
||||
split[i] = 0;
|
||||
split[pipe->prev_odm_pipe->pipe_idx] = 0;
|
||||
} else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
|
||||
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
|
||||
/*If 2 way split but can support 4 way split, then split each pipe again*/
|
||||
if (context->stream_count == 1 && plane_count == 1
|
||||
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4) {
|
||||
split[i] = 2;
|
||||
} else {
|
||||
if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
|
||||
if (get_num_mpc_splits(pipe) == 1) {
|
||||
/*If need split for mpc but 2 way split already*/
|
||||
if (split[i] == 4)
|
||||
split[i] = 2; /* 2 -> 4 MPC */
|
||||
else if (split[i] == 2)
|
||||
split[i] = 0; /* 2 -> 2 MPC */
|
||||
else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
|
||||
merge[i] = true; /* 2 -> 1 MPC */
|
||||
} else if (get_num_mpc_splits(pipe) == 3) {
|
||||
/*If need split for mpc but 4 way split already*/
|
||||
if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
|
||||
|| !pipe->bottom_pipe)) {
|
||||
merge[i] = true; /* 4 -> 2 MPC */
|
||||
} else if (split[i] == 0 && pipe->top_pipe &&
|
||||
pipe->top_pipe->plane_state == pipe->plane_state)
|
||||
merge[i] = true; /* 4 -> 1 MPC */
|
||||
split[i] = 0;
|
||||
split[pipe->top_pipe->pipe_idx] = 0;
|
||||
}
|
||||
} else if (pipe->prev_odm_pipe || (dcn20_find_previous_split_count(pipe) == 2 && pipe->top_pipe)) {
|
||||
if (split[i] == 0) {
|
||||
/*Exiting mpc/odm combine*/
|
||||
merge[i] = true;
|
||||
} else {
|
||||
/*Transition from mpc combine to odm combine or vice versa*/
|
||||
ASSERT(0); /*should not actually happen yet*/
|
||||
split[i] = 2;
|
||||
merge[i] = true;
|
||||
} else if (get_num_odm_splits(pipe)) {
|
||||
/* ODM -> MPC transition */
|
||||
ASSERT(0); /* NOT expected yet */
|
||||
if (pipe->prev_odm_pipe) {
|
||||
split[pipe->prev_odm_pipe->pipe_idx] = 2;
|
||||
merge[pipe->prev_odm_pipe->pipe_idx] = true;
|
||||
} else {
|
||||
split[pipe->top_pipe->pipe_idx] = 2;
|
||||
merge[pipe->top_pipe->pipe_idx] = true;
|
||||
split[i] = 0;
|
||||
merge[i] = true;
|
||||
}
|
||||
}
|
||||
} else if (dcn20_find_previous_split_count(pipe) == 3) {
|
||||
if (split[i] == 0 && !pipe->top_pipe) {
|
||||
merge[pipe->bottom_pipe->pipe_idx] = true;
|
||||
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
} else if (split[i] == 2 && !pipe->top_pipe) {
|
||||
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
split[i] = 0;
|
||||
}
|
||||
} else if (dcn20_find_previous_split_count(pipe) == 4) {
|
||||
if (split[i] == 0 && !pipe->top_pipe) {
|
||||
merge[pipe->bottom_pipe->pipe_idx] = true;
|
||||
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
} else if (split[i] == 2 && !pipe->top_pipe) {
|
||||
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
|
||||
} else {
|
||||
if (get_num_odm_splits(pipe) == 1) {
|
||||
/*If need split for odm but 2 way split already*/
|
||||
if (split[i] == 4)
|
||||
split[i] = 2; /* 2 -> 4 ODM */
|
||||
else if (split[i] == 2)
|
||||
split[i] = 0; /* 2 -> 2 ODM */
|
||||
else if (pipe->prev_odm_pipe) {
|
||||
ASSERT(0); /* NOT expected yet */
|
||||
merge[i] = true; /* exit ODM */
|
||||
}
|
||||
} else if (get_num_odm_splits(pipe) == 3) {
|
||||
/*If need split for odm but 4 way split already*/
|
||||
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|
||||
|| !pipe->next_odm_pipe)) {
|
||||
ASSERT(0); /* NOT expected yet */
|
||||
merge[i] = true; /* 4 -> 2 ODM */
|
||||
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
|
||||
ASSERT(0); /* NOT expected yet */
|
||||
merge[i] = true; /* exit ODM */
|
||||
}
|
||||
split[i] = 0;
|
||||
} else if (get_num_mpc_splits(pipe)) {
|
||||
/* MPC -> ODM transition */
|
||||
ASSERT(0); /* NOT expected yet */
|
||||
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
|
||||
split[i] = 0;
|
||||
merge[i] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Adjust dppclk when split is forced, do not bother with dispclk */
|
||||
if (split[i] != 0
|
||||
&& context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
|
||||
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
|
||||
if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
|
||||
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
@ -2792,7 +2799,7 @@ bool dcn20_fast_validate_bw(
|
||||
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
|
||||
ASSERT(hsplit_pipe);
|
||||
if (!dcn20_split_stream_for_odm(
|
||||
&context->res_ctx, dc->res_pool,
|
||||
dc, &context->res_ctx,
|
||||
pipe, hsplit_pipe))
|
||||
goto validate_fail;
|
||||
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
|
||||
@ -2821,7 +2828,7 @@ bool dcn20_fast_validate_bw(
|
||||
}
|
||||
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
|
||||
if (!dcn20_split_stream_for_odm(
|
||||
&context->res_ctx, dc->res_pool,
|
||||
dc, &context->res_ctx,
|
||||
pipe, hsplit_pipe))
|
||||
goto validate_fail;
|
||||
dcn20_build_mapped_resource(dc, context, pipe->stream);
|
||||
|
@ -119,7 +119,6 @@ void dcn20_set_mcif_arb_params(
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt);
|
||||
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
|
||||
int dcn20_find_previous_split_count(struct pipe_ctx *pipe);
|
||||
int dcn20_validate_apply_pipe_split_flags(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
@ -136,10 +135,14 @@ void dcn20_split_stream_for_mpc(
|
||||
struct pipe_ctx *primary_pipe,
|
||||
struct pipe_ctx *secondary_pipe);
|
||||
bool dcn20_split_stream_for_odm(
|
||||
const struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct pipe_ctx *prev_odm_pipe,
|
||||
struct pipe_ctx *next_odm_pipe);
|
||||
void dcn20_acquire_dsc(const struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
struct display_stream_compressor **dsc,
|
||||
int pipe_idx);
|
||||
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
|
@ -49,11 +49,6 @@
|
||||
#define FN(reg_name, field_name) \
|
||||
hubbub1->shifts->field_name, hubbub1->masks->field_name
|
||||
|
||||
#ifdef NUM_VMID
|
||||
#undef NUM_VMID
|
||||
#endif
|
||||
#define NUM_VMID 16
|
||||
|
||||
static uint32_t convert_and_clamp(
|
||||
uint32_t wm_ns,
|
||||
uint32_t refclk_mhz,
|
||||
@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
|
||||
|
||||
dcn21_dchvm_init(hubbub);
|
||||
|
||||
return NUM_VMID;
|
||||
return hubbub1->num_vmid;
|
||||
}
|
||||
|
||||
bool hubbub21_program_urgent_watermarks(
|
||||
|
@ -86,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
||||
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.power_down = dce110_power_down,
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
|
@ -805,7 +805,7 @@ static const struct resource_caps res_cap_rn = {
|
||||
.num_pll = 5, // maybe 3 because the last two used for USB-c
|
||||
.num_dwb = 1,
|
||||
.num_ddc = 5,
|
||||
.num_vmid = 1,
|
||||
.num_vmid = 16,
|
||||
.num_dsc = 3,
|
||||
};
|
||||
|
||||
@ -1295,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
|
||||
vmid->shifts = &vmid_shifts;
|
||||
vmid->masks = &vmid_masks;
|
||||
}
|
||||
hubbub->num_vmid = res_cap_rn.num_vmid;
|
||||
|
||||
return &hubbub->base;
|
||||
}
|
||||
|
@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
|
||||
endif
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
|
||||
|
||||
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
|
||||
dml_common_defs.o
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
|
||||
|
@ -26,7 +26,6 @@
|
||||
#ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__
|
||||
#define __DML20_DISPLAY_RQ_DLG_CALC_H__
|
||||
|
||||
#include "../dml_common_defs.h"
|
||||
#include "../display_rq_dlg_helpers.h"
|
||||
|
||||
struct display_mode_lib;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
|
||||
#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
|
||||
|
||||
#include "../dml_common_defs.h"
|
||||
#include "../display_rq_dlg_helpers.h"
|
||||
|
||||
struct display_mode_lib;
|
||||
|
@ -26,7 +26,7 @@
|
||||
#ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
|
||||
#define __DML21_DISPLAY_RQ_DLG_CALC_H__
|
||||
|
||||
#include "../dml_common_defs.h"
|
||||
#include "dm_services.h"
|
||||
#include "../display_rq_dlg_helpers.h"
|
||||
|
||||
struct display_mode_lib;
|
||||
|
@ -25,8 +25,10 @@
|
||||
#ifndef __DISPLAY_MODE_LIB_H__
|
||||
#define __DISPLAY_MODE_LIB_H__
|
||||
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
#include "dm_services.h"
|
||||
#include "dc_features.h"
|
||||
#include "display_mode_structs.h"
|
||||
#include "display_mode_enums.h"
|
||||
#include "display_mode_vba.h"
|
||||
|
||||
enum dml_project {
|
||||
|
@ -27,8 +27,6 @@
|
||||
#ifndef __DML2_DISPLAY_MODE_VBA_H__
|
||||
#define __DML2_DISPLAY_MODE_VBA_H__
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
|
||||
struct display_mode_lib;
|
||||
|
||||
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
|
||||
#define __DISPLAY_RQ_DLG_HELPERS_H__
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
#include "display_mode_lib.h"
|
||||
|
||||
/* Function: Printer functions
|
||||
|
@ -26,8 +26,6 @@
|
||||
#ifndef __DISPLAY_RQ_DLG_CALC_H__
|
||||
#define __DISPLAY_RQ_DLG_CALC_H__
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
|
||||
struct display_mode_lib;
|
||||
|
||||
#include "display_rq_dlg_helpers.h"
|
||||
|
@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
#include "dcn_calc_math.h"
|
||||
|
||||
#include "dml_inline_defs.h"
|
||||
|
||||
double dml_round(double a)
|
||||
{
|
||||
double round_pt = 0.5;
|
||||
double ceil = dml_ceil(a, 1);
|
||||
double floor = dml_floor(a, 1);
|
||||
|
||||
if (a - floor >= round_pt)
|
||||
return ceil;
|
||||
else
|
||||
return floor;
|
||||
}
|
||||
|
||||
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DC_COMMON_DEFS_H__
|
||||
#define __DC_COMMON_DEFS_H__
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dc_features.h"
|
||||
#include "display_mode_structs.h"
|
||||
#include "display_mode_enums.h"
|
||||
|
||||
|
||||
double dml_round(double a);
|
||||
|
||||
#endif /* __DC_COMMON_DEFS_H__ */
|
@ -26,7 +26,6 @@
|
||||
#ifndef __DML_INLINE_DEFS_H__
|
||||
#define __DML_INLINE_DEFS_H__
|
||||
|
||||
#include "dml_common_defs.h"
|
||||
#include "dcn_calc_math.h"
|
||||
#include "dml_logger.h"
|
||||
|
||||
@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity)
|
||||
return (double) dcn_bw_floor2(a, granularity);
|
||||
}
|
||||
|
||||
static inline double dml_round(double a)
|
||||
{
|
||||
double round_pt = 0.5;
|
||||
double ceil = dml_ceil(a, 1);
|
||||
double floor = dml_floor(a, 1);
|
||||
|
||||
if (a - floor >= round_pt)
|
||||
return ceil;
|
||||
else
|
||||
return floor;
|
||||
}
|
||||
|
||||
static inline int dml_log2(double x)
|
||||
{
|
||||
return dml_round((double)dcn_bw_log(x, 2));
|
||||
@ -112,7 +123,7 @@ static inline double dml_log(double x, double base)
|
||||
|
||||
static inline unsigned int dml_round_to_multiple(unsigned int num,
|
||||
unsigned int multiple,
|
||||
bool up)
|
||||
unsigned char up)
|
||||
{
|
||||
unsigned int remainder;
|
||||
|
||||
|
@ -96,6 +96,11 @@ struct hw_sequencer_funcs {
|
||||
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
|
||||
struct crtc_position *position);
|
||||
int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
|
||||
void (*calc_vupdate_position)(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
uint32_t *start_line,
|
||||
uint32_t *end_line);
|
||||
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
|
||||
int group_size, struct pipe_ctx *grouped_pipes[]);
|
||||
void (*enable_timing_synchronization)(struct dc *dc,
|
||||
|
@ -177,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
|
||||
void get_audio_check(struct audio_info *aud_modes,
|
||||
struct audio_check *aud_chk);
|
||||
|
||||
int get_num_mpc_splits(struct pipe_ctx *pipe);
|
||||
|
||||
int get_num_odm_splits(struct pipe_ctx *pipe);
|
||||
|
||||
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
|
||||
|
@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
|
||||
evict_vmids(core_vmid);
|
||||
|
||||
vmid = get_next_available_vmid(core_vmid);
|
||||
add_ptb_to_table(core_vmid, vmid, ptb);
|
||||
if (vmid != -1) {
|
||||
add_ptb_to_table(core_vmid, vmid, ptb);
|
||||
|
||||
dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
|
||||
dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
|
||||
} else
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
return vmid;
|
||||
|
@ -40,6 +40,13 @@ enum amd_chip_flags {
|
||||
AMD_EXP_HW_SUPPORT = 0x00080000UL,
|
||||
};
|
||||
|
||||
enum amd_apu_flags {
|
||||
AMD_APU_IS_RAVEN = 0x00000001UL,
|
||||
AMD_APU_IS_RAVEN2 = 0x00000002UL,
|
||||
AMD_APU_IS_PICASSO = 0x00000004UL,
|
||||
AMD_APU_IS_RENOIR = 0x00000008UL,
|
||||
};
|
||||
|
||||
enum amd_ip_block_type {
|
||||
AMD_IP_BLOCK_TYPE_COMMON,
|
||||
AMD_IP_BLOCK_TYPE_GMC,
|
||||
@ -150,6 +157,13 @@ enum DC_FEATURE_MASK {
|
||||
DC_PSR_MASK = 0x8,
|
||||
};
|
||||
|
||||
enum DC_DEBUG_MASK {
|
||||
DC_DISABLE_PIPE_SPLIT = 0x1,
|
||||
DC_DISABLE_STUTTER = 0x2,
|
||||
DC_DISABLE_DSC = 0x4,
|
||||
DC_DISABLE_CLOCK_GATING = 0x8
|
||||
};
|
||||
|
||||
enum amd_dpm_forced_level;
|
||||
/**
|
||||
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
|
||||
|
@ -623,6 +623,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
struct arcturus_dpm_table *dpm_table = NULL;
|
||||
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return snprintf(buf, PAGE_SIZE, "unavailable\n");
|
||||
|
||||
dpm_table = smu_dpm->dpm_context;
|
||||
|
||||
switch (type) {
|
||||
@ -998,6 +1001,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
return 0;
|
||||
|
||||
if (!data || !size)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -410,12 +410,10 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
|
||||
struct smu10_voltage_dependency_table **pptable,
|
||||
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
uint32_t i;
|
||||
struct smu10_voltage_dependency_table *ptable;
|
||||
|
||||
table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
|
||||
ptable = kzalloc(table_size, GFP_KERNEL);
|
||||
|
||||
ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
|
||||
if (NULL == ptable)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1304,8 +1302,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
||||
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
if ((adev->asic_type == CHIP_RAVEN) &&
|
||||
(adev->rev_id != 0x15d8) &&
|
||||
if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
|
||||
(hwmgr->smu_version >= 0x41e2b))
|
||||
return true;
|
||||
else
|
||||
|
@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
|
||||
|
||||
struct smu10_voltage_dependency_table {
|
||||
uint32_t count;
|
||||
struct smu10_clock_voltage_dependency_record entries[1];
|
||||
struct smu10_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct smu10_clock_voltage_information {
|
||||
|
@ -597,58 +597,40 @@ int phm_irq_process(struct amdgpu_device *adev,
|
||||
|
||||
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
|
||||
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
|
||||
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
|
||||
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
||||
else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
|
||||
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
||||
/*
|
||||
* HW CTF just occurred. Shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
if (src_id == 0) {
|
||||
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
} else
|
||||
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
|
||||
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
|
||||
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
||||
/*
|
||||
* HW CTF just occurred. Shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
}
|
||||
|
||||
|
@ -1565,40 +1565,28 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
|
||||
if (client_id == SOC15_IH_CLIENTID_THM) {
|
||||
switch (src_id) {
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
|
||||
pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
|
||||
/*
|
||||
* SW CTF just occurred.
|
||||
* Try to do a graceful shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
break;
|
||||
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
|
||||
pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
|
||||
break;
|
||||
default:
|
||||
pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
|
||||
src_id,
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
|
||||
src_id);
|
||||
break;
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
|
||||
pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
|
||||
PCI_BUS_NUM(adev->pdev->devfn),
|
||||
PCI_SLOT(adev->pdev->devfn),
|
||||
PCI_FUNC(adev->pdev->devfn));
|
||||
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
|
||||
/*
|
||||
* HW CTF just occurred. Shutdown to prevent further damage.
|
||||
*/
|
||||
dev_emerg(adev->dev, "System is going to shutdown due to HW CTF!\n");
|
||||
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
|
||||
orderly_poweroff(true);
|
||||
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
|
||||
if (src_id == 0xfe) {
|
||||
|
@ -226,7 +226,8 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
|
||||
adev->pm.fw_version = hwmgr->smu_version >> 8;
|
||||
|
||||
if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
|
||||
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
|
||||
(adev->apu_flags & AMD_APU_IS_RAVEN) &&
|
||||
adev->pm.fw_version < 0x1e45)
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user