mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 08:46:40 +07:00
Merge tag 'drm-next-5.5-2019-10-25' of git://people.freedesktop.org/~agd5f/linux into drm-next
drm-next-5.5-2019-10-25: amdgpu: - BACO support for CI and VI asics - Quick memory training support for navi - MSI-X support - RAS fixes - Display AVI infoframe fixes - Display ref clock fixes for renoir - Fix number of audio endpoints in renoir - Fix for discovery tables - Powerplay fixes - Documentation fixes - Misc cleanups radeon: - revert a PPC fix which broke x86 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191025221020.203546-1-alexander.deucher@amd.com
This commit is contained in:
commit
60845e34f0
@ -150,6 +150,7 @@ extern uint amdgpu_sdma_phase_quantum;
|
||||
extern char *amdgpu_disable_cu;
|
||||
extern char *amdgpu_virtual_display;
|
||||
extern uint amdgpu_pp_feature_mask;
|
||||
extern uint amdgpu_force_long_training;
|
||||
extern int amdgpu_job_hang_limit;
|
||||
extern int amdgpu_lbpw;
|
||||
extern int amdgpu_compute_multipipe;
|
||||
@ -288,6 +289,9 @@ struct amdgpu_ip_block_version {
|
||||
const struct amd_ip_funcs *funcs;
|
||||
};
|
||||
|
||||
#define HW_REV(_Major, _Minor, _Rev) \
|
||||
((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
|
||||
|
||||
struct amdgpu_ip_block {
|
||||
struct amdgpu_ip_block_status status;
|
||||
const struct amdgpu_ip_block_version *version;
|
||||
@ -627,6 +631,11 @@ struct amdgpu_fw_vram_usage {
|
||||
u64 size;
|
||||
struct amdgpu_bo *reserved_bo;
|
||||
void *va;
|
||||
|
||||
/* Offset on the top of VRAM, used as c2p write buffer.
|
||||
*/
|
||||
u64 mem_train_fb_loc;
|
||||
bool mem_train_support;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -759,6 +768,7 @@ struct amdgpu_device {
|
||||
uint8_t *bios;
|
||||
uint32_t bios_size;
|
||||
struct amdgpu_bo *stolen_vga_memory;
|
||||
struct amdgpu_bo *discovery_memory;
|
||||
uint32_t bios_scratch_reg_offset;
|
||||
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
||||
|
||||
@ -959,8 +969,6 @@ struct amdgpu_device {
|
||||
int asic_reset_res;
|
||||
struct work_struct xgmi_reset_work;
|
||||
|
||||
bool in_baco_reset;
|
||||
|
||||
long gfx_timeout;
|
||||
long sdma_timeout;
|
||||
long video_timeout;
|
||||
@ -982,6 +990,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
void amdgpu_device_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write);
|
||||
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
||||
uint32_t acc_flags);
|
||||
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
||||
|
@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||
if (adev->is_atom_fw) {
|
||||
amdgpu_atomfirmware_scratch_regs_init(adev);
|
||||
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
||||
ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to get mem train fb location.\n");
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
amdgpu_atombios_scratch_regs_init(adev);
|
||||
amdgpu_atombios_allocate_fb_scratch(adev);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "atom.h"
|
||||
#include "atombios.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
||||
{
|
||||
@ -462,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if VBIOS supports GDDR6 training data save/restore
|
||||
*/
|
||||
static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
|
||||
{
|
||||
uint16_t data_offset;
|
||||
int index;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
|
||||
NULL, NULL, &data_offset)) {
|
||||
struct atom_firmware_info_v3_1 *firmware_info =
|
||||
(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
DRM_DEBUG("atom firmware capability:0x%08x.\n",
|
||||
le32_to_cpu(firmware_info->firmware_capability));
|
||||
|
||||
if (le32_to_cpu(firmware_info->firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int gddr6_mem_train_support(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
uint32_t major, minor, revision, hw_v;
|
||||
|
||||
if (gddr6_mem_train_vbios_support(adev)) {
|
||||
amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
|
||||
hw_v = HW_REV(major, minor, revision);
|
||||
/*
|
||||
* treat 0 revision as a special case since register for MP0 and MMHUB is missing
|
||||
* for some Navi10 A0, preventing driver from discovering the hwip information since
|
||||
* none of the functions will be initialized, it should not cause any problems
|
||||
*/
|
||||
switch (hw_v) {
|
||||
case HW_REV(11, 0, 0):
|
||||
case HW_REV(11, 0, 5):
|
||||
ret = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("memory training vbios supports but psp hw(%08x)"
|
||||
" doesn't support!\n", hw_v);
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ret = 0;
|
||||
hw_v = -1;
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
|
||||
{
|
||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||
unsigned char *bios = ctx->bios;
|
||||
struct vram_reserve_block *reserved_block;
|
||||
int index, block_number;
|
||||
uint8_t frev, crev;
|
||||
uint16_t data_offset, size;
|
||||
uint32_t start_address_in_kb;
|
||||
uint64_t offset;
|
||||
int ret;
|
||||
|
||||
adev->fw_vram_usage.mem_train_support = false;
|
||||
|
||||
if (adev->asic_type != CHIP_NAVI10 &&
|
||||
adev->asic_type != CHIP_NAVI14)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
ret = gddr6_mem_train_support(adev);
|
||||
if (ret == -1)
|
||||
return -EINVAL;
|
||||
else if (ret == 0)
|
||||
return 0;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
vram_usagebyfirmware);
|
||||
ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
|
||||
&data_offset);
|
||||
if (ret == 0) {
|
||||
DRM_ERROR("parse data header failed.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
|
||||
" crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
|
||||
/* only support 2.1+ */
|
||||
if (((uint16_t)frev << 8 | crev) < 0x0201) {
|
||||
DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reserved_block = (struct vram_reserve_block *)
|
||||
(bios + data_offset + sizeof(struct atom_common_table_header));
|
||||
block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
|
||||
/ sizeof(struct vram_reserve_block);
|
||||
reserved_block += (block_number > 0) ? block_number-1 : 0;
|
||||
DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
|
||||
block_number,
|
||||
le32_to_cpu(reserved_block->start_address_in_kb),
|
||||
le16_to_cpu(reserved_block->used_by_firmware_in_kb),
|
||||
le16_to_cpu(reserved_block->used_by_driver_in_kb));
|
||||
if (reserved_block->used_by_firmware_in_kb > 0) {
|
||||
start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
|
||||
offset = (uint64_t)start_address_in_kb * ONE_KiB;
|
||||
if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
|
||||
offset -= ONE_MiB;
|
||||
}
|
||||
|
||||
offset &= ~(ONE_MiB - 1);
|
||||
adev->fw_vram_usage.mem_train_fb_loc = offset;
|
||||
adev->fw_vram_usage.mem_train_support = true;
|
||||
DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
|
||||
ret = 0;
|
||||
} else {
|
||||
DRM_ERROR("used_by_firmware_in_kb is 0!\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||
int *vram_width, int *vram_type, int *vram_vendor);
|
||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||
|
@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void)
|
||||
bool d3_supported = false;
|
||||
struct pci_dev *parent_pdev;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
|
||||
parent_pdev = pci_upstream_bridge(pdev);
|
||||
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||
amdgpu_atpx_get_quirks(pdev);
|
||||
}
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||
while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
|
||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||
|
@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
while (i--) {
|
||||
for (i = 0; i < last_entry; ++i) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
||||
|
||||
amdgpu_bo_unref(&bo);
|
||||
}
|
||||
for (i = first_userptr; i < num_entries; ++i) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
|
||||
|
||||
amdgpu_bo_unref(&bo);
|
||||
|
@ -474,7 +474,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
|
||||
list_for_each_entry(lobj, validated, tv.head) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
|
||||
bool binding_userptr = false;
|
||||
struct mm_struct *usermm;
|
||||
|
||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||
@ -491,17 +490,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
|
||||
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||
lobj->user_pages);
|
||||
binding_userptr = true;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_validate(p, bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (binding_userptr) {
|
||||
kvfree(lobj->user_pages);
|
||||
lobj->user_pages = NULL;
|
||||
}
|
||||
kvfree(lobj->user_pages);
|
||||
lobj->user_pages = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -153,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* VRAM access helper functions.
|
||||
*
|
||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
* @buf: virtual address of the buffer in system memory
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*/
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write)
|
||||
{
|
||||
uint64_t last;
|
||||
unsigned long flags;
|
||||
|
||||
last = size - 4;
|
||||
for (last += pos; pos <= last; pos += 4) {
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
|
||||
if (write)
|
||||
WREG32_NO_KIQ(mmMM_DATA, *buf++);
|
||||
else
|
||||
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* MMIO register access helper functions.
|
||||
*/
|
||||
@ -2622,8 +2652,11 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
|
||||
* There is only one value specified and
|
||||
* it should apply to all non-compute jobs.
|
||||
*/
|
||||
if (index == 1)
|
||||
if (index == 1) {
|
||||
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
|
||||
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
|
||||
adev->compute_timeout = adev->gfx_timeout;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -3168,15 +3201,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
*/
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
pci_save_state(dev->pdev);
|
||||
if (suspend) {
|
||||
pci_save_state(dev->pdev);
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
} else {
|
||||
r = amdgpu_asic_reset(adev);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu asic reset failed\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
|
||||
|
||||
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
|
||||
{
|
||||
uint32_t *p = (uint32_t *)binary;
|
||||
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
|
||||
uint64_t pos = vram_size - BINARY_MAX_SIZE;
|
||||
unsigned long flags;
|
||||
|
||||
while (pos < vram_size) {
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
|
||||
*p++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
pos += 4;
|
||||
}
|
||||
uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
|
||||
|
||||
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
|
||||
uint16_t checksum;
|
||||
int r;
|
||||
|
||||
adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
|
||||
adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
|
||||
if (!adev->discovery)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||
int *major, int *minor)
|
||||
int *major, int *minor, int *revision)
|
||||
{
|
||||
struct binary_header *bhdr;
|
||||
struct ip_discovery_header *ihdr;
|
||||
@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||
*major = ip->major;
|
||||
if (minor)
|
||||
*minor = ip->minor;
|
||||
if (revision)
|
||||
*revision = ip->revision;
|
||||
return 0;
|
||||
}
|
||||
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
|
||||
|
@ -24,11 +24,13 @@
|
||||
#ifndef __AMDGPU_DISCOVERY__
|
||||
#define __AMDGPU_DISCOVERY__
|
||||
|
||||
#define DISCOVERY_TMR_SIZE (64 << 10)
|
||||
|
||||
int amdgpu_discovery_init(struct amdgpu_device *adev);
|
||||
void amdgpu_discovery_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
|
||||
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
|
||||
int *major, int *minor);
|
||||
int *major, int *minor, int *revision);
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __AMDGPU_DISCOVERY__ */
|
||||
|
@ -128,6 +128,7 @@ char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
/* OverDrive(bit 14) disabled by default*/
|
||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||
uint amdgpu_force_long_training = 0;
|
||||
int amdgpu_job_hang_limit = 0;
|
||||
int amdgpu_lbpw = -1;
|
||||
int amdgpu_compute_multipipe = -1;
|
||||
@ -250,9 +251,11 @@ module_param_named(msi, amdgpu_msi, int, 0444);
|
||||
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
|
||||
* jobs is 10000. And there is no timeout enforced on compute jobs.
|
||||
*/
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
|
||||
"for passthrough or sriov, 10000 for all jobs."
|
||||
" 0: keep default value. negative: infinity timeout), "
|
||||
"format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
|
||||
"format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
|
||||
"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
|
||||
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
|
||||
|
||||
/**
|
||||
@ -390,6 +393,14 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: forcelongtraining (uint)
|
||||
* Force long memory training in resume.
|
||||
* The default is zero, indicates short training in resume.
|
||||
*/
|
||||
MODULE_PARM_DESC(forcelongtraining, "force memory long training");
|
||||
module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: pcie_gen_cap (uint)
|
||||
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
|
||||
@ -1154,8 +1165,13 @@ static int amdgpu_pmops_resume(struct device *dev)
|
||||
static int amdgpu_pmops_freeze(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
||||
int r;
|
||||
|
||||
return amdgpu_device_suspend(drm_dev, false, true);
|
||||
r = amdgpu_device_suspend(drm_dev, false, true);
|
||||
if (r)
|
||||
return r;
|
||||
return amdgpu_asic_reset(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_thaw(struct device *dev)
|
||||
|
@ -67,6 +67,8 @@ struct amdgpu_nbio_funcs {
|
||||
bool enable);
|
||||
void (*ih_doorbell_range)(struct amdgpu_device *adev,
|
||||
bool use_doorbell, int doorbell_index);
|
||||
void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
|
||||
|
@ -517,7 +517,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
.interruptible = (bp->type != ttm_bo_type_kernel),
|
||||
.no_wait_gpu = bp->no_wait_gpu,
|
||||
.resv = bp->resv,
|
||||
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
|
||||
.flags = bp->type != ttm_bo_type_kernel ?
|
||||
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
unsigned long page_align, size = bp->size;
|
||||
|
@ -88,6 +88,17 @@ static int psp_sw_init(void *handle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_mem_training_init(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initliaze memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -95,6 +106,7 @@ static int psp_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
psp_mem_training_fini(&adev->psp);
|
||||
release_firmware(adev->psp.sos_fw);
|
||||
adev->psp.sos_fw = NULL;
|
||||
release_firmware(adev->psp.asd_fw);
|
||||
@ -1608,6 +1620,12 @@ static int psp_resume(void *handle)
|
||||
|
||||
DRM_INFO("PSP is resuming...\n");
|
||||
|
||||
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to process memory training!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->firmware.mutex);
|
||||
|
||||
ret = psp_hw_start(psp);
|
||||
|
@ -49,6 +49,8 @@ enum psp_bootloader_cmd {
|
||||
PSP_BL__LOAD_SYSDRV = 0x10000,
|
||||
PSP_BL__LOAD_SOSDRV = 0x20000,
|
||||
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
||||
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
|
||||
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
|
||||
};
|
||||
|
||||
enum psp_ring_type
|
||||
@ -111,6 +113,9 @@ struct psp_funcs
|
||||
struct ta_ras_trigger_error_input *info);
|
||||
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
|
||||
int (*rlc_autoload_start)(struct psp_context *psp);
|
||||
int (*mem_training_init)(struct psp_context *psp);
|
||||
void (*mem_training_fini)(struct psp_context *psp);
|
||||
int (*mem_training)(struct psp_context *psp, uint32_t ops);
|
||||
};
|
||||
|
||||
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
|
||||
@ -161,6 +166,49 @@ struct psp_dtm_context {
|
||||
void *dtm_shared_buf;
|
||||
};
|
||||
|
||||
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
|
||||
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
|
||||
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
|
||||
|
||||
enum psp_memory_training_init_flag {
|
||||
PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
|
||||
PSP_MEM_TRAIN_SUPPORT = 0x1,
|
||||
PSP_MEM_TRAIN_INIT_FAILED = 0x2,
|
||||
PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
|
||||
PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
|
||||
};
|
||||
|
||||
enum psp_memory_training_ops {
|
||||
PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
|
||||
PSP_MEM_TRAIN_SAVE = 0x2,
|
||||
PSP_MEM_TRAIN_RESTORE = 0x4,
|
||||
PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
|
||||
PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
|
||||
PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
|
||||
};
|
||||
|
||||
struct psp_memory_training_context {
|
||||
/*training data size*/
|
||||
u64 train_data_size;
|
||||
/*
|
||||
* sys_cache
|
||||
* cpu virtual address
|
||||
* system memory buffer that used to store the training data.
|
||||
*/
|
||||
void *sys_cache;
|
||||
|
||||
/*vram offset of the p2c training data*/
|
||||
u64 p2c_train_data_offset;
|
||||
struct amdgpu_bo *p2c_bo;
|
||||
|
||||
/*vram offset of the c2p training data*/
|
||||
u64 c2p_train_data_offset;
|
||||
struct amdgpu_bo *c2p_bo;
|
||||
|
||||
enum psp_memory_training_init_flag init;
|
||||
u32 training_cnt;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
@ -239,6 +287,7 @@ struct psp_context
|
||||
struct psp_hdcp_context hdcp_context;
|
||||
struct psp_dtm_context dtm_context;
|
||||
struct mutex mutex;
|
||||
struct psp_memory_training_context mem_train_ctx;
|
||||
};
|
||||
|
||||
struct amdgpu_psp_funcs {
|
||||
@ -281,6 +330,12 @@ struct amdgpu_psp_funcs {
|
||||
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
|
||||
#define psp_rlc_autoload(psp) \
|
||||
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
|
||||
#define psp_mem_training_init(psp) \
|
||||
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
|
||||
#define psp_mem_training_fini(psp) \
|
||||
((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
|
||||
#define psp_mem_training(psp, ops) \
|
||||
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
|
||||
|
||||
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
||||
|
||||
|
@ -71,6 +71,9 @@ const char *ras_block_string[] = {
|
||||
|
||||
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
|
||||
|
||||
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
||||
uint64_t addr);
|
||||
|
||||
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
@ -215,11 +218,12 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
|
||||
* value to the address.
|
||||
*
|
||||
* Second member: struct ras_debug_if::op.
|
||||
* It has three kinds of operations.
|
||||
* It has four kinds of operations.
|
||||
*
|
||||
* - 0: disable RAS on the block. Take ::head as its data.
|
||||
* - 1: enable RAS on the block. Take ::head as its data.
|
||||
* - 2: inject errors on the block. Take ::inject as its data.
|
||||
* - 3: reboot on unrecoverable error
|
||||
*
|
||||
* How to use the interface?
|
||||
* programs:
|
||||
@ -228,13 +232,13 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
|
||||
*
|
||||
* .. code-block:: bash
|
||||
*
|
||||
* echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
|
||||
* echo op block [error [sub_block address value]] > .../ras/ras_ctrl
|
||||
*
|
||||
* op: disable, enable, inject
|
||||
* disable: only block is needed
|
||||
* enable: block and error are needed
|
||||
* inject: error, address, value are needed
|
||||
* block: umc, smda, gfx, .........
|
||||
* block: umc, sdma, gfx, .........
|
||||
* see ras_block_string[] for details
|
||||
* error: ue, ce
|
||||
* ue: multi_uncorrectable
|
||||
@ -290,6 +294,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||
break;
|
||||
}
|
||||
|
||||
/* umc ce/ue error injection for a bad page is not allowed */
|
||||
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
|
||||
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
|
||||
DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
|
||||
data.inject.address);
|
||||
break;
|
||||
}
|
||||
|
||||
/* data.inject.address is offset instead of absolute gpu address */
|
||||
ret = amdgpu_ras_error_inject(adev, &data.inject);
|
||||
break;
|
||||
@ -1430,6 +1442,39 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if an address belongs to bad page
|
||||
*
|
||||
* Note: this check is only for umc block
|
||||
*/
|
||||
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
||||
uint64_t addr)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_err_handler_data *data;
|
||||
int i;
|
||||
bool ret = false;
|
||||
|
||||
if (!con || !con->eh_data)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&con->recovery_lock);
|
||||
data = con->eh_data;
|
||||
if (!data)
|
||||
goto out;
|
||||
|
||||
addr >>= AMDGPU_GPU_PAGE_SHIFT;
|
||||
for (i = 0; i < data->count; i++)
|
||||
if (addr == data->bps[i].retired_page) {
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&con->recovery_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* called in gpu recovery/init */
|
||||
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
|
||||
{
|
||||
@ -1843,6 +1888,12 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t hw_supported, supported;
|
||||
|
||||
amdgpu_ras_check_supported(adev, &hw_supported, &supported);
|
||||
if (!hw_supported)
|
||||
return;
|
||||
|
||||
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
|
||||
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
|
||||
|
||||
|
@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
|
||||
__field(unsigned int, context)
|
||||
__field(unsigned int, seqno)
|
||||
__field(struct dma_fence *, fence)
|
||||
__field(char *, ring_name)
|
||||
__string(ring, to_amdgpu_ring(job->base.sched)->name)
|
||||
__field(u32, num_ibs)
|
||||
),
|
||||
|
||||
@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
|
||||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||
__entry->context = job->base.s_fence->finished.context;
|
||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||
__assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
||||
__entry->seqno, __entry->ring_name, __entry->num_ibs)
|
||||
__entry->seqno, __get_str(ring), __entry->num_ibs)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_sched_run_job,
|
||||
@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
||||
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||
__field(unsigned int, context)
|
||||
__field(unsigned int, seqno)
|
||||
__field(char *, ring_name)
|
||||
__string(ring, to_amdgpu_ring(job->base.sched)->name)
|
||||
__field(u32, num_ibs)
|
||||
),
|
||||
|
||||
@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
|
||||
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
|
||||
__entry->context = job->base.s_fence->finished.context;
|
||||
__entry->seqno = job->base.s_fence->finished.seqno;
|
||||
__entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
|
||||
__assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
|
||||
__entry->num_ibs = job->num_ibs;
|
||||
),
|
||||
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
|
||||
__entry->sched_job_id, __get_str(timeline), __entry->context,
|
||||
__entry->seqno, __entry->ring_name, __entry->num_ibs)
|
||||
__entry->seqno, __get_str(ring), __entry->num_ibs)
|
||||
);
|
||||
|
||||
|
||||
@ -473,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
|
||||
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
|
||||
TP_ARGS(sched_job, fence),
|
||||
TP_STRUCT__entry(
|
||||
__field(const char *,name)
|
||||
__string(ring, sched_job->base.sched->name);
|
||||
__field(uint64_t, id)
|
||||
__field(struct dma_fence *, fence)
|
||||
__field(uint64_t, ctx)
|
||||
@ -481,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->name = sched_job->base.sched->name;
|
||||
__assign_str(ring, sched_job->base.sched->name)
|
||||
__entry->id = sched_job->base.id;
|
||||
__entry->fence = fence;
|
||||
__entry->ctx = fence->context;
|
||||
__entry->seqno = fence->seqno;
|
||||
),
|
||||
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
|
||||
__entry->name, __entry->id,
|
||||
__get_str(ring), __entry->id,
|
||||
__entry->fence, __entry->ctx,
|
||||
__entry->seqno)
|
||||
);
|
||||
|
@ -1652,6 +1652,88 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
||||
&adev->fw_vram_usage.va);
|
||||
}
|
||||
|
||||
/*
|
||||
* Memoy training reservation functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* free memory training reserved vram if it has been reserved.
|
||||
*/
|
||||
static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
||||
|
||||
ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
|
||||
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
|
||||
ctx->c2p_bo = NULL;
|
||||
|
||||
amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
|
||||
ctx->p2c_bo = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* create bo vram reservation from memory training.
|
||||
*/
|
||||
static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
if (!adev->fw_vram_usage.mem_train_support) {
|
||||
DRM_DEBUG("memory training does not support!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
|
||||
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
|
||||
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
|
||||
|
||||
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
|
||||
ctx->train_data_size,
|
||||
ctx->p2c_train_data_offset,
|
||||
ctx->c2p_train_data_offset);
|
||||
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
ctx->p2c_train_data_offset,
|
||||
ctx->train_data_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&ctx->p2c_bo,
|
||||
NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
|
||||
goto Err_out;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_create_kernel_at(adev,
|
||||
ctx->c2p_train_data_offset,
|
||||
ctx->train_data_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&ctx->c2p_bo,
|
||||
NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
|
||||
goto Err_out;
|
||||
}
|
||||
|
||||
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
|
||||
return 0;
|
||||
|
||||
Err_out:
|
||||
amdgpu_ttm_training_reserve_vram_fini(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
|
||||
* gtt/vram related fields.
|
||||
@ -1726,6 +1808,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
*The reserved vram for memory training must be pinned to the specified
|
||||
*place on the VRAM, so reserve it early.
|
||||
*/
|
||||
r = amdgpu_ttm_training_reserve_vram_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* allocate memory as required for VGA
|
||||
* This is used for VGA emulation and pre-OS scanout buffers to
|
||||
* avoid display artifacts while transitioning between pre-OS
|
||||
@ -1736,6 +1826,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
NULL, &stolen_vga_buf);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* reserve one TMR (64K) memory at the top of VRAM which holds
|
||||
* IP Discovery data and is protected by PSP.
|
||||
*/
|
||||
r = amdgpu_bo_create_kernel_at(adev,
|
||||
adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
|
||||
DISCOVERY_TMR_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->discovery_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
||||
|
||||
@ -1800,6 +1904,9 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
|
||||
void *stolen_vga_buf;
|
||||
/* return the VGA stolen memory (if any) back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
|
||||
|
||||
/* return the IP Discovery TMR memory back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1811,6 +1918,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
return;
|
||||
|
||||
amdgpu_ttm_debugfs_fini(adev);
|
||||
amdgpu_ttm_training_reserve_vram_fini(adev);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
if (adev->mman.aper_base_kaddr)
|
||||
iounmap(adev->mman.aper_base_kaddr);
|
||||
@ -1907,10 +2015,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
||||
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
|
||||
AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
num_dw = adev->mman.buffer_funcs->copy_num_dw;
|
||||
while (num_dw & 0x7)
|
||||
num_dw++;
|
||||
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = num_pages * 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
|
||||
@ -1970,11 +2075,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
|
||||
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
|
||||
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
|
||||
num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
|
||||
|
||||
/* for IB padding */
|
||||
while (num_dw & 0x7)
|
||||
num_dw++;
|
||||
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
|
||||
if (r)
|
||||
|
@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
|
||||
MODULE_FIRMWARE(FIRMWARE_VEGA20);
|
||||
|
||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence);
|
||||
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence);
|
||||
|
||||
/**
|
||||
* amdgpu_vce_init - allocate memory, load vce firmware
|
||||
@ -428,14 +433,15 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||
*
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 1024;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -444,7 +450,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
|
||||
ib = &job->ibs[0];
|
||||
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
/* stitch together an VCE create msg */
|
||||
ib->length_dw = 0;
|
||||
@ -476,8 +482,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
|
||||
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
|
||||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
@ -507,8 +513,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
*
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 1024;
|
||||
struct amdgpu_job *job;
|
||||
@ -1110,13 +1116,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
/* skip vce ring1/2 ib test for now, since it's not reliable */
|
||||
if (ring != &ring->adev->vce.ring[0])
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -1132,5 +1145,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
@ -58,10 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence);
|
||||
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence);
|
||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
|
@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||
}
|
||||
|
||||
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
@ -621,13 +622,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
}
|
||||
|
||||
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
ib->ptr[ib->length_dw++] = 0x0000000b;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
@ -675,13 +677,20 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2971,6 +2971,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
||||
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
||||
amdgpu_vm_prt_fini(adev, vm);
|
||||
prt_fini_needed = false;
|
||||
}
|
||||
|
||||
list_del(&mapping->list);
|
||||
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
||||
}
|
||||
|
||||
amdgpu_vm_free_pts(adev, vm, NULL);
|
||||
amdgpu_bo_unreserve(root);
|
||||
amdgpu_bo_unref(&root);
|
||||
@ -2990,15 +3000,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
list_del(&mapping->list);
|
||||
kfree(mapping);
|
||||
}
|
||||
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
|
||||
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
|
||||
amdgpu_vm_prt_fini(adev, vm);
|
||||
prt_fini_needed = false;
|
||||
}
|
||||
|
||||
list_del(&mapping->list);
|
||||
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
|
||||
}
|
||||
|
||||
dma_fence_put(vm->last_update);
|
||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||
|
@ -1270,15 +1270,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_asic_reset - soft reset GPU
|
||||
* cik_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Look up which blocks are hung and attempt
|
||||
* to reset them.
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int cik_asic_reset(struct amdgpu_device *adev)
|
||||
static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -1294,7 +1294,45 @@ static int cik_asic_reset(struct amdgpu_device *adev)
|
||||
static enum amd_reset_method
|
||||
cik_asic_reset_method(struct amdgpu_device *adev)
|
||||
{
|
||||
return AMD_RESET_METHOD_LEGACY;
|
||||
bool baco_reset;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
/* disable baco reset until it works */
|
||||
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
|
||||
baco_reset = false;
|
||||
break;
|
||||
default:
|
||||
baco_reset = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (baco_reset)
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
return AMD_RESET_METHOD_LEGACY;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_asic_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Look up which blocks are hung and attempt
|
||||
* to reset them.
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int cik_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
||||
r = smu7_asic_baco_reset(adev);
|
||||
else
|
||||
r = cik_asic_pci_config_reset(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
|
||||
|
@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev,
|
||||
int cik_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
||||
void legacy_doorbell_index_init(struct amdgpu_device *adev);
|
||||
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
|
||||
int smu7_asic_baco_reset(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -5283,15 +5283,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
|
||||
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* init asic gds info */
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
default:
|
||||
adev->gds.gds_size = 0x10000;
|
||||
adev->gds.gds_compute_max_wave_id = 0x4ff;
|
||||
break;
|
||||
}
|
||||
unsigned total_cu = adev->gfx.config.max_cu_per_sh *
|
||||
adev->gfx.config.max_sh_per_se *
|
||||
adev->gfx.config.max_shader_engines;
|
||||
|
||||
adev->gds.gds_size = 0x10000;
|
||||
adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
|
||||
adev->gds.gws_size = 64;
|
||||
adev->gds.oa_size = 16;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1137,13 +1137,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
*/
|
||||
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
int r, i;
|
||||
bool value;
|
||||
u32 tmp;
|
||||
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
golden_settings_vega10_hdp,
|
||||
ARRAY_SIZE(golden_settings_vega10_hdp));
|
||||
int r;
|
||||
|
||||
if (adev->gart.bo == NULL) {
|
||||
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
|
||||
@ -1153,15 +1147,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
/* TODO for renoir */
|
||||
mmhub_v1_0_update_power_gating(adev, true);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
r = gfxhub_v1_0_gart_enable(adev);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1173,6 +1158,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool value;
|
||||
int r, i;
|
||||
u32 tmp;
|
||||
|
||||
/* The sequence of these two function calls matters.*/
|
||||
gmc_v9_0_init_golden_registers(adev);
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
/* Lockout access through VGA aperture*/
|
||||
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
||||
|
||||
/* disable VGA render */
|
||||
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_device_program_register_sequence(adev,
|
||||
golden_settings_vega10_hdp,
|
||||
ARRAY_SIZE(golden_settings_vega10_hdp));
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
/* TODO for renoir */
|
||||
mmhub_v1_0_update_power_gating(adev, true);
|
||||
break;
|
||||
case CHIP_ARCTURUS:
|
||||
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
||||
|
||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
||||
@ -1201,31 +1229,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
||||
if (adev->umc.funcs && adev->umc.funcs->init_registers)
|
||||
adev->umc.funcs->init_registers(adev);
|
||||
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
(unsigned)(adev->gmc.gart_size >> 20),
|
||||
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
|
||||
adev->gart.ready = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_hw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* The sequence of these two function calls matters.*/
|
||||
gmc_v9_0_init_golden_registers(adev);
|
||||
|
||||
if (adev->mode_info.num_crtc) {
|
||||
if (adev->asic_type != CHIP_ARCTURUS) {
|
||||
/* Lockout access through VGA aperture*/
|
||||
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
||||
|
||||
/* disable VGA render */
|
||||
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
r = gmc_v9_0_gart_enable(adev);
|
||||
|
||||
return r;
|
||||
|
@ -502,6 +502,13 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
|
||||
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
|
||||
@ -516,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
|
||||
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
|
||||
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
|
||||
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
|
||||
.enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
|
||||
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
|
||||
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
|
||||
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
|
||||
|
@ -40,6 +40,9 @@
|
||||
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven_ta.bin");
|
||||
|
||||
static int psp_v10_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
|
@ -58,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
|
||||
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
|
||||
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
|
||||
#define mmSDMA0_UCODE_DATA_NV10 0x5881
|
||||
/* memory training timeout define */
|
||||
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
|
||||
|
||||
static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
@ -206,18 +208,26 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
|
||||
return sol_reg != 0x0;
|
||||
}
|
||||
|
||||
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
/* Check tOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg) {
|
||||
if (psp_v11_0_is_sos_alive(psp)) {
|
||||
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
|
||||
return 0;
|
||||
@ -253,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg) {
|
||||
if (psp_v11_0_is_sos_alive(psp)) {
|
||||
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
|
||||
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
|
||||
return 0;
|
||||
@ -297,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
||||
int ret;
|
||||
unsigned int psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t sol_reg;
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
if (sol_reg)
|
||||
if (psp_v11_0_is_sos_alive(psp))
|
||||
return 0;
|
||||
|
||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
||||
@ -898,6 +904,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
|
||||
return psp_rlc_autoload_start(psp);
|
||||
}
|
||||
|
||||
static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
uint32_t data_32;
|
||||
int max_wait;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
|
||||
|
||||
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
|
||||
for (i = 0; i < max_wait; i++) {
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret == 0)
|
||||
break;
|
||||
}
|
||||
if (i < max_wait)
|
||||
ret = 0;
|
||||
else
|
||||
ret = -ETIME;
|
||||
|
||||
DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
|
||||
(msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
|
||||
(ret == 0) ? "succeed" : "failed",
|
||||
i, adev->usec_timeout/1000);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_v11_0_memory_training_fini(struct psp_context *psp)
|
||||
{
|
||||
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
|
||||
|
||||
ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
|
||||
kfree(ctx->sys_cache);
|
||||
ctx->sys_cache = NULL;
|
||||
}
|
||||
|
||||
static int psp_v11_0_memory_training_init(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
|
||||
|
||||
if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
|
||||
DRM_DEBUG("memory training is not supported!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
|
||||
if (ctx->sys_cache == NULL) {
|
||||
DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
|
||||
ret = -ENOMEM;
|
||||
goto Err_out;
|
||||
}
|
||||
|
||||
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
|
||||
ctx->train_data_size,
|
||||
ctx->p2c_train_data_offset,
|
||||
ctx->c2p_train_data_offset);
|
||||
ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
|
||||
return 0;
|
||||
|
||||
Err_out:
|
||||
psp_v11_0_memory_training_fini(psp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* save and restore proces
|
||||
*/
|
||||
static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
|
||||
{
|
||||
int ret;
|
||||
uint32_t p2c_header[4];
|
||||
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
|
||||
uint32_t *pcache = (uint32_t*)ctx->sys_cache;
|
||||
|
||||
if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
|
||||
DRM_DEBUG("Memory training is not supported.\n");
|
||||
return 0;
|
||||
} else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
|
||||
DRM_ERROR("Memory training initialization failure.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (psp_v11_0_is_sos_alive(psp)) {
|
||||
DRM_DEBUG("SOS is alive, skip memory training.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
|
||||
DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
|
||||
pcache[0], pcache[1], pcache[2], pcache[3],
|
||||
p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
|
||||
DRM_DEBUG("Short training depends on restore.\n");
|
||||
ops |= PSP_MEM_TRAIN_RESTORE;
|
||||
}
|
||||
|
||||
if ((ops & PSP_MEM_TRAIN_RESTORE) &&
|
||||
pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
|
||||
DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
|
||||
ops |= PSP_MEM_TRAIN_SAVE;
|
||||
}
|
||||
|
||||
if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
|
||||
!(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
|
||||
pcache[3] == p2c_header[3])) {
|
||||
DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
|
||||
ops |= PSP_MEM_TRAIN_SAVE;
|
||||
}
|
||||
|
||||
if ((ops & PSP_MEM_TRAIN_SAVE) &&
|
||||
p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
|
||||
DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
|
||||
ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
|
||||
}
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
|
||||
ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
|
||||
ops |= PSP_MEM_TRAIN_SAVE;
|
||||
}
|
||||
|
||||
DRM_DEBUG("Memory training ops:%x.\n", ops);
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
|
||||
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
|
||||
if (ret) {
|
||||
DRM_ERROR("Send long training msg failed.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_SAVE) {
|
||||
amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
|
||||
}
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_RESTORE) {
|
||||
amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
|
||||
}
|
||||
|
||||
if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
|
||||
ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
|
||||
PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
|
||||
if (ret) {
|
||||
DRM_ERROR("send training msg failed.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ctx->training_cnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v11_0_funcs = {
|
||||
.init_microcode = psp_v11_0_init_microcode,
|
||||
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
|
||||
@ -918,6 +1080,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
|
||||
.ras_trigger_error = psp_v11_0_ras_trigger_error,
|
||||
.ras_cure_posion = psp_v11_0_ras_cure_posion,
|
||||
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
|
||||
.mem_training_init = psp_v11_0_memory_training_init,
|
||||
.mem_training_fini = psp_v11_0_memory_training_fini,
|
||||
.mem_training = psp_v11_0_memory_training,
|
||||
};
|
||||
|
||||
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
|
||||
|
@ -1792,7 +1792,7 @@ static int sdma_v4_0_hw_init(void *handle)
|
||||
|
||||
if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->set_powergating_by_smu) ||
|
||||
adev->asic_type == CHIP_RENOIR)
|
||||
(adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
|
@ -478,36 +478,58 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
|
||||
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
|
||||
{
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
|
||||
*cap = false;
|
||||
return -ENOENT;
|
||||
*cap = smu_baco_is_support(smu);
|
||||
return 0;
|
||||
} else {
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
|
||||
*cap = false;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return pp_funcs->get_asic_baco_capability(pp_handle, cap);
|
||||
}
|
||||
|
||||
return pp_funcs->get_asic_baco_capability(pp_handle, cap);
|
||||
}
|
||||
|
||||
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
|
||||
return -ENOENT;
|
||||
|
||||
/* enter BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
|
||||
return -EIO;
|
||||
|
||||
/* exit BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
|
||||
return -EIO;
|
||||
/* avoid NBIF got stuck when do RAS recovery in BACO reset */
|
||||
if (ras && ras->supported)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
|
||||
|
||||
dev_info(adev->dev, "GPU BACO reset\n");
|
||||
|
||||
adev->in_baco_reset = 1;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (smu_baco_reset(smu))
|
||||
return -EIO;
|
||||
} else {
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
|
||||
return -ENOENT;
|
||||
|
||||
/* enter BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
|
||||
return -EIO;
|
||||
|
||||
/* exit BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* re-enable doorbell interrupt after BACO exit */
|
||||
if (ras && ras->supported)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ struct soc15_allowed_register_entry {
|
||||
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
|
||||
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
|
||||
|
||||
#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
|
||||
|
||||
void soc15_grbm_select(struct amdgpu_device *adev,
|
||||
u32 me, u32 pipe, u32 queue, u32 vmid);
|
||||
int soc15_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
@ -268,13 +269,14 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
*/
|
||||
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00010000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
@ -327,13 +329,20 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
|
||||
@ -275,13 +276,14 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
uint64_t dummy;
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
dummy = ib->gpu_addr + 1024;
|
||||
addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
ib->length_dw = 0;
|
||||
ib->ptr[ib->length_dw++] = 0x00000018;
|
||||
ib->ptr[ib->length_dw++] = 0x00000001;
|
||||
ib->ptr[ib->length_dw++] = handle;
|
||||
ib->ptr[ib->length_dw++] = 0x00000000;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
|
||||
ib->ptr[ib->length_dw++] = dummy;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
|
||||
ib->ptr[ib->length_dw++] = addr;
|
||||
|
||||
ib->ptr[ib->length_dw++] = 0x00000014;
|
||||
ib->ptr[ib->length_dw++] = 0x00000002;
|
||||
@ -334,13 +336,20 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
||||
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
struct amdgpu_bo *bo = NULL;
|
||||
long r;
|
||||
|
||||
r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
|
||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_vcn.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "vcn_v2_0.h"
|
||||
@ -709,6 +710,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
uint32_t rb_bufsz, tmp;
|
||||
int i, j, k, r;
|
||||
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
@ -939,6 +943,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
|
||||
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
|
||||
}
|
||||
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
|
||||
{
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
|
||||
*cap = false;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return pp_funcs->get_asic_baco_capability(pp_handle, cap);
|
||||
}
|
||||
|
||||
int smu7_asic_baco_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
|
||||
return -ENOENT;
|
||||
|
||||
/* enter BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 1))
|
||||
return -EIO;
|
||||
|
||||
/* exit BACO state */
|
||||
if (pp_funcs->set_asic_baco_state(pp_handle, 0))
|
||||
return -EIO;
|
||||
|
||||
dev_info(adev->dev, "GPU BACO reset\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vi_asic_reset - soft reset GPU
|
||||
* vi_asic_pci_config_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Look up which blocks are hung and attempt
|
||||
* to reset them.
|
||||
* Use PCI Config method to reset the GPU.
|
||||
*
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int vi_asic_reset(struct amdgpu_device *adev)
|
||||
static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev)
|
||||
static enum amd_reset_method
|
||||
vi_asic_reset_method(struct amdgpu_device *adev)
|
||||
{
|
||||
return AMD_RESET_METHOD_LEGACY;
|
||||
bool baco_reset;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TOPAZ:
|
||||
smu7_asic_get_baco_capability(adev, &baco_reset);
|
||||
break;
|
||||
default:
|
||||
baco_reset = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (baco_reset)
|
||||
return AMD_RESET_METHOD_BACO;
|
||||
else
|
||||
return AMD_RESET_METHOD_LEGACY;
|
||||
}
|
||||
|
||||
/**
|
||||
* vi_asic_reset - soft reset GPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Look up which blocks are hung and attempt
|
||||
* to reset them.
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
static int vi_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
||||
r = smu7_asic_baco_reset(adev);
|
||||
else
|
||||
r = vi_asic_pci_config_reset(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
|
||||
|
@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev,
|
||||
int vi_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
||||
void legacy_doorbell_index_init(struct amdgpu_device *adev);
|
||||
int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
|
||||
int smu7_asic_baco_reset(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -3396,7 +3396,8 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
struct dc_crtc_timing *timing_out = &stream->timing;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
|
||||
struct hdmi_vendor_infoframe hv_frame;
|
||||
struct hdmi_avi_infoframe avi_frame;
|
||||
|
||||
timing_out->h_border_left = 0;
|
||||
timing_out->h_border_right = 0;
|
||||
@ -3433,6 +3434,13 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
|
||||
}
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
|
||||
timing_out->vic = avi_frame.video_code;
|
||||
drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
|
||||
timing_out->hdmi_vic = hv_frame.vic;
|
||||
}
|
||||
|
||||
timing_out->h_addressable = mode_in->crtc_hdisplay;
|
||||
timing_out->h_total = mode_in->crtc_htotal;
|
||||
timing_out->h_sync_width =
|
||||
@ -3653,6 +3661,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
stream->dm_stream_context = aconnector;
|
||||
|
||||
stream->timing.flags.LTE_340MCSC_SCRAMBLE =
|
||||
drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
|
||||
|
||||
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
|
||||
/* Search for preferred mode */
|
||||
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
@ -3727,6 +3738,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
update_stream_signal(stream, sink);
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
|
||||
|
||||
finish:
|
||||
dc_sink_release(sink);
|
||||
|
||||
|
@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
/* Configure dithering */
|
||||
if (!dm_need_crc_dither(source))
|
||||
if (!dm_need_crc_dither(source)) {
|
||||
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
|
||||
else
|
||||
dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
|
||||
DYN_EXPANSION_DISABLE);
|
||||
} else {
|
||||
dc_stream_set_dither_option(stream_state,
|
||||
DITHER_OPTION_DEFAULT);
|
||||
dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
|
||||
DYN_EXPANSION_AUTO);
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
|
@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
||||
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
|
||||
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
|
||||
&wm_with_clock_ranges);
|
||||
else if (adev->smu.funcs &&
|
||||
adev->smu.funcs->set_watermarks_for_clock_ranges)
|
||||
else
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu,
|
||||
&wm_with_clock_ranges);
|
||||
&wm_with_clock_ranges);
|
||||
}
|
||||
|
||||
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
|
||||
@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
|
||||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
|
||||
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
|
||||
wm_with_clock_ranges.wm_dmif_clocks_ranges;
|
||||
@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
|
||||
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
|
||||
}
|
||||
|
||||
if (!smu->funcs)
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
|
||||
/* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
|
||||
* 1: fail
|
||||
*/
|
||||
if (smu_set_watermarks_for_clock_ranges(&adev->smu,
|
||||
&wm_with_clock_ranges))
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
|
||||
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
@ -901,6 +891,90 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
|
||||
return PP_SMU_RESULT_FAIL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
|
||||
enum pp_smu_status pp_rn_get_dpm_clock_table(
|
||||
struct pp_smu *pp, struct dpm_clocks *clock_table)
|
||||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (!smu->ppt_funcs)
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
|
||||
if (!smu->ppt_funcs->get_dpm_clock_table)
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
|
||||
if (!smu->ppt_funcs->get_dpm_clock_table(smu, clock_table))
|
||||
return PP_SMU_RESULT_OK;
|
||||
|
||||
return PP_SMU_RESULT_FAIL;
|
||||
}
|
||||
|
||||
enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
|
||||
struct pp_smu_wm_range_sets *ranges)
|
||||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
|
||||
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
|
||||
wm_with_clock_ranges.wm_dmif_clocks_ranges;
|
||||
struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
|
||||
wm_with_clock_ranges.wm_mcif_clocks_ranges;
|
||||
int32_t i;
|
||||
|
||||
if (!smu->funcs)
|
||||
return PP_SMU_RESULT_UNSUPPORTED;
|
||||
|
||||
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
|
||||
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
|
||||
if (ranges->reader_wm_sets[i].wm_inst > 3)
|
||||
wm_dce_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_dce_clocks[i].wm_set_id =
|
||||
ranges->reader_wm_sets[i].wm_inst;
|
||||
|
||||
wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_drain_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_drain_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].min_fill_clk_mhz;
|
||||
|
||||
wm_dce_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->reader_wm_sets[i].max_fill_clk_mhz;
|
||||
}
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
|
||||
if (ranges->writer_wm_sets[i].wm_inst > 3)
|
||||
wm_soc_clocks[i].wm_set_id = WM_SET_A;
|
||||
else
|
||||
wm_soc_clocks[i].wm_set_id =
|
||||
ranges->writer_wm_sets[i].wm_inst;
|
||||
wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_fill_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_fill_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_min_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].min_drain_clk_mhz;
|
||||
|
||||
wm_soc_clocks[i].wm_max_mem_clk_in_khz =
|
||||
ranges->writer_wm_sets[i].max_drain_clk_mhz;
|
||||
}
|
||||
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
|
||||
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
void dm_pp_get_funcs(
|
||||
struct dc_context *ctx,
|
||||
struct pp_smu_funcs *funcs)
|
||||
@ -945,6 +1019,15 @@ void dm_pp_get_funcs(
|
||||
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
|
||||
case DCN_VERSION_2_1:
|
||||
funcs->ctx.ver = PP_SMU_VER_RN;
|
||||
funcs->rn_funcs.pp_smu.dm = ctx;
|
||||
funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
|
||||
funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM_ERROR("smu version is not supported !\n");
|
||||
break;
|
||||
|
@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
|
||||
|
||||
/* Sort voltage table from low to high*/
|
||||
if (result == BP_RESULT_OK) {
|
||||
struct clock_voltage_caps temp = {0, 0};
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
|
||||
@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
|
||||
info->disp_clk_voltage[j].max_supported_clk <
|
||||
info->disp_clk_voltage[j-1].max_supported_clk) {
|
||||
/* swap j and j - 1*/
|
||||
temp = info->disp_clk_voltage[j-1];
|
||||
info->disp_clk_voltage[j-1] =
|
||||
info->disp_clk_voltage[j];
|
||||
info->disp_clk_voltage[j] = temp;
|
||||
swap(info->disp_clk_voltage[j - 1],
|
||||
info->disp_clk_voltage[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info(
|
||||
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
struct clock_voltage_caps temp = {0, 0};
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
|
||||
@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info(
|
||||
info->disp_clk_voltage[j-1].max_supported_clk
|
||||
) {
|
||||
/* swap j and j - 1*/
|
||||
temp = info->disp_clk_voltage[j-1];
|
||||
info->disp_clk_voltage[j-1] =
|
||||
info->disp_clk_voltage[j];
|
||||
info->disp_clk_voltage[j] = temp;
|
||||
swap(info->disp_clk_voltage[j - 1],
|
||||
info->disp_clk_voltage[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct dc_link *edp_link = get_edp_link(dc);
|
||||
|
||||
if (dc->hwss.exit_optimized_pwr_state)
|
||||
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
|
||||
|
||||
if (edp_link) {
|
||||
clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
|
||||
dc_link_set_psr_allow_active(edp_link, false, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct dc_link *edp_link = get_edp_link(dc);
|
||||
|
||||
if (edp_link)
|
||||
dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
|
||||
|
||||
if (dc->hwss.optimize_pwr_state)
|
||||
dc->hwss.optimize_pwr_state(dc, dc->current_state);
|
||||
|
||||
}
|
||||
|
||||
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
|
||||
{
|
||||
|
@ -349,12 +349,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||
}
|
||||
}
|
||||
|
||||
static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b)
|
||||
{
|
||||
if (a->dispclk_khz != b->dispclk_khz)
|
||||
return false;
|
||||
else if (a->dppclk_khz != b->dppclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_khz != b->dcfclk_khz)
|
||||
return false;
|
||||
else if (a->socclk_khz != b->socclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
|
||||
return false;
|
||||
else if (a->phyclk_khz != b->phyclk_khz)
|
||||
return false;
|
||||
else if (a->dramclk_khz != b->dramclk_khz)
|
||||
return false;
|
||||
else if (a->p_state_change_support != b->p_state_change_support)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn2_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.update_clocks = dcn2_update_clocks,
|
||||
.init_clocks = dcn2_init_clocks,
|
||||
.enable_pme_wa = dcn2_enable_pme_wa,
|
||||
.get_clock = dcn2_get_clock,
|
||||
.are_clock_states_equal = dcn2_are_clock_states_equal,
|
||||
};
|
||||
|
||||
|
||||
|
@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||
enum dc_clock_type clock_type,
|
||||
struct dc_clock_config *clock_cfg);
|
||||
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
|
||||
#endif //__DCN20_CLK_MGR_H__
|
||||
|
@ -52,6 +52,45 @@
|
||||
#define REG(reg_name) \
|
||||
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
|
||||
|
||||
|
||||
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
|
||||
int rn_get_active_display_cnt_wa(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i, display_count;
|
||||
bool hdmi_present = false;
|
||||
|
||||
display_count = 0;
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
hdmi_present = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
const struct dc_link *link = dc->links[i];
|
||||
|
||||
/*
|
||||
* Only notify active stream or virtual stream.
|
||||
* Need to notify virtual stream to work around
|
||||
* headless case. HPD does not fire when system is in
|
||||
* S0i2.
|
||||
*/
|
||||
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
|
||||
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
display_count++;
|
||||
}
|
||||
|
||||
/* WA for hang on HDMI after display off back back on*/
|
||||
if (display_count == 0 && hdmi_present)
|
||||
display_count = 1;
|
||||
|
||||
return display_count;
|
||||
}
|
||||
|
||||
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower)
|
||||
@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
int display_count;
|
||||
bool update_dppclk = false;
|
||||
bool update_dispclk = false;
|
||||
bool enter_display_off = false;
|
||||
bool dpp_clock_lowered = false;
|
||||
|
||||
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
|
||||
|
||||
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
if (display_count == 0)
|
||||
enter_display_off = true;
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
* also if safe to lower is false, we just go in the higher state
|
||||
*/
|
||||
if (safe_to_lower) {
|
||||
/* check that we're not already in lower */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_OPTIMIZED) {
|
||||
|
||||
if (enter_display_off == safe_to_lower) {
|
||||
rn_vbios_smu_set_display_count(clk_mgr, display_count);
|
||||
display_count = rn_get_active_display_cnt_wa(dc, context);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0) {
|
||||
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_OPTIMIZED);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_OPTIMIZED;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* check that we're not already in the normal state */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_NORMAL) {
|
||||
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_NORMAL);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_NORMAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
|
||||
@ -319,7 +377,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
|
||||
|
||||
rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
|
||||
|
||||
s->dprefclk_khz = sb.dprefclk;
|
||||
s->dprefclk_khz = sb.dprefclk * 1000;
|
||||
}
|
||||
|
||||
void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
@ -329,10 +387,19 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
rn_vbios_smu_enable_pme_wa(clk_mgr);
|
||||
}
|
||||
|
||||
void rn_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_NORMAL;
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn21_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.update_clocks = rn_update_clocks,
|
||||
.init_clocks = dcn2_init_clocks,
|
||||
.init_clocks = rn_init_clocks,
|
||||
.enable_pme_wa = rn_enable_pme_wa,
|
||||
/* .dump_clk_registers = rn_dump_clk_registers */
|
||||
};
|
||||
@ -405,7 +472,7 @@ struct clk_bw_params rn_bw_params = {
|
||||
}
|
||||
};
|
||||
|
||||
void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
|
||||
void rn_build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
|
||||
@ -462,23 +529,50 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
|
||||
|
||||
}
|
||||
|
||||
void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
|
||||
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->DcfClocks[i].Vol == voltage)
|
||||
return clock_table->DcfClocks[i].Freq;
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
j = -1;
|
||||
|
||||
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->FClocks[i].Freq == 0)
|
||||
break;
|
||||
/* Find lowest DPM, FCLK is filled in reverse order*/
|
||||
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
|
||||
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
|
||||
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
|
||||
if (clock_table->FClocks[i].Freq != 0) {
|
||||
j = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j == -1) {
|
||||
/* clock table is all 0s, just use our own hardcode */
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
bw_params->clk_table.num_entries = j + 1;
|
||||
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
|
||||
}
|
||||
bw_params->clk_table.num_entries = i;
|
||||
|
||||
bw_params->vram_type = asic_id->vram_type;
|
||||
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
|
||||
@ -486,7 +580,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
|
||||
if (clock_table->FClocks[i].Freq == 0) {
|
||||
if (i >= bw_params->clk_table.num_entries) {
|
||||
bw_params->wm_table.entries[i].valid = false;
|
||||
continue;
|
||||
}
|
||||
@ -547,25 +641,24 @@ void rn_clk_mgr_construct(
|
||||
clk_mgr->dentist_vco_freq_khz = 3600000;
|
||||
|
||||
rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
|
||||
clk_mgr->base.dprefclk_khz = s.dprefclk;
|
||||
|
||||
if (clk_mgr->base.dprefclk_khz != 600000) {
|
||||
clk_mgr->base.dprefclk_khz = 600000;
|
||||
ASSERT(1); //TODO: Renoir follow up.
|
||||
}
|
||||
/* Convert dprefclk units from MHz to KHz */
|
||||
/* Value already divided by 10, some resolution lost */
|
||||
clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
|
||||
|
||||
/* in case we don't get a value from the register, use default */
|
||||
if (clk_mgr->base.dprefclk_khz == 0)
|
||||
if (clk_mgr->base.dprefclk_khz == 0) {
|
||||
ASSERT(clk_mgr->base.dprefclk_khz == 600000);
|
||||
clk_mgr->base.dprefclk_khz = 600000;
|
||||
}
|
||||
}
|
||||
|
||||
dce_clock_read_ss_info(clk_mgr);
|
||||
|
||||
clk_mgr->base.bw_params = &rn_bw_params;
|
||||
|
||||
if (pp_smu) {
|
||||
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
|
||||
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
|
||||
clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
|
||||
rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -576,15 +669,16 @@ void rn_clk_mgr_construct(
|
||||
if (!debug->disable_pplib_wm_range) {
|
||||
struct pp_smu_wm_range_sets ranges = {0};
|
||||
|
||||
build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
|
||||
rn_build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
|
||||
|
||||
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
|
||||
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
|
||||
pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
|
||||
}
|
||||
|
||||
/* enable powerfeatures when displaycount goes to 0 */
|
||||
if (!debug->disable_48mhz_pwrdwn)
|
||||
rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
|
||||
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
|
||||
/* enable powerfeatures when displaycount goes to 0 */
|
||||
rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,11 +26,20 @@
|
||||
#ifndef __RN_CLK_MGR_H__
|
||||
#define __RN_CLK_MGR_H__
|
||||
|
||||
#include "clk_mgr.h"
|
||||
#include "dm_pp_smu.h"
|
||||
|
||||
struct rn_clk_registers {
|
||||
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
|
||||
};
|
||||
|
||||
|
||||
void rn_build_watermark_ranges(
|
||||
struct clk_bw_params *bw_params,
|
||||
struct pp_smu_wm_range_sets *ranges);
|
||||
void rn_clk_mgr_helper_populate_bw_params(
|
||||
struct clk_bw_params *bw_params,
|
||||
struct dpm_clocks *clock_table,
|
||||
struct hw_asic_id *asic_id);
|
||||
void rn_clk_mgr_construct(struct dc_context *ctx,
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "mp/mp_12_0_0_sh_mask.h"
|
||||
|
||||
#define REG(reg_name) \
|
||||
(MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
|
||||
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
|
||||
|
||||
#define FN(reg_name, field) \
|
||||
FD(reg_name##__##field)
|
||||
@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
|
||||
int actual_dispclk_set_mhz = -1;
|
||||
struct dc *core_dc = clk_mgr->base.ctx->dc;
|
||||
struct dmcu *dmcu = core_dc->res_pool->dmcu;
|
||||
uint32_t clk = requested_dispclk_khz / 1000;
|
||||
|
||||
if (clk <= 100)
|
||||
clk = 101;
|
||||
|
||||
/* Unit of SMU msg parameter is Mhz */
|
||||
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
clk);
|
||||
requested_dispclk_khz / 1000);
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
|
||||
@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
|
||||
{
|
||||
int actual_dcfclk_set_mhz = -1;
|
||||
|
||||
if (clk_mgr->smu_ver < 0xFFFFFFFF)
|
||||
if (clk_mgr->smu_ver < 0x370c00)
|
||||
return actual_dcfclk_set_mhz;
|
||||
|
||||
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
|
||||
{
|
||||
int actual_min_ds_dcfclk_mhz = -1;
|
||||
|
||||
if (clk_mgr->smu_ver < 0xFFFFFFFF)
|
||||
if (clk_mgr->smu_ver < 0x370c00)
|
||||
return actual_min_ds_dcfclk_mhz;
|
||||
|
||||
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
|
||||
{
|
||||
int actual_dppclk_set_mhz = -1;
|
||||
|
||||
uint32_t clk = requested_dpp_khz / 1000;
|
||||
|
||||
if (clk <= 100)
|
||||
clk = 101;
|
||||
|
||||
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
clk);
|
||||
requested_dpp_khz / 1000);
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count)
|
||||
void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
|
||||
{
|
||||
int disp_count;
|
||||
|
||||
if (state == DCN_PWR_STATE_OPTIMIZED)
|
||||
disp_count = 0;
|
||||
else
|
||||
disp_count = 1;
|
||||
|
||||
rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayCount,
|
||||
display_count);
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayCount,
|
||||
disp_count);
|
||||
}
|
||||
|
||||
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr)
|
||||
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
{
|
||||
rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
|
||||
0);
|
||||
enable);
|
||||
}
|
||||
|
||||
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
|
||||
|
@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
|
||||
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
|
||||
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
|
||||
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
|
||||
void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count);
|
||||
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr);
|
||||
void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
|
||||
void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
|
||||
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
|
||||
|
@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
|
||||
return false;
|
||||
}
|
||||
|
||||
void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
|
||||
enum dc_dynamic_expansion option)
|
||||
{
|
||||
/* OPP FMT dyn expansion updates*/
|
||||
int i = 0;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (dc->current_state->res_ctx.pipe_ctx[i].stream
|
||||
== stream) {
|
||||
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
pipe_ctx->stream_res.opp->dyn_expansion = option;
|
||||
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
|
||||
pipe_ctx->stream_res.opp,
|
||||
COLOR_SPACE_YCBCR601,
|
||||
stream->timing.display_color_depth,
|
||||
stream->signal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dc_stream_set_dither_option(struct dc_stream_state *stream,
|
||||
enum dc_dither_option option)
|
||||
{
|
||||
@ -918,15 +939,11 @@ static void program_timing_sync(
|
||||
|
||||
/* set first pipe with plane as master */
|
||||
for (j = 0; j < group_size; j++) {
|
||||
struct pipe_ctx *temp;
|
||||
|
||||
if (pipe_set[j]->plane_state) {
|
||||
if (j == 0)
|
||||
break;
|
||||
|
||||
temp = pipe_set[0];
|
||||
pipe_set[0] = pipe_set[j];
|
||||
pipe_set[j] = temp;
|
||||
swap(pipe_set[0], pipe_set[j]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -983,6 +1000,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
struct timing_generator *tg;
|
||||
struct stream_encoder *se = NULL;
|
||||
|
||||
struct dc_crtc_timing hw_crtc_timing = {0};
|
||||
|
||||
struct dc_link *link = sink->link;
|
||||
unsigned int i, enc_inst, tg_inst = 0;
|
||||
|
||||
@ -1002,6 +1023,9 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
|
||||
for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
|
||||
if (dc->res_pool->stream_enc[i]->id == enc_inst) {
|
||||
|
||||
se = dc->res_pool->stream_enc[i];
|
||||
|
||||
tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
|
||||
dc->res_pool->stream_enc[i]);
|
||||
break;
|
||||
@ -1017,10 +1041,46 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
|
||||
tg = dc->res_pool->timing_generators[tg_inst];
|
||||
|
||||
if (!tg->funcs->is_matching_timing)
|
||||
if (!tg->funcs->get_hw_timing)
|
||||
return false;
|
||||
|
||||
if (!tg->funcs->is_matching_timing(tg, crtc_timing))
|
||||
if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_total != hw_crtc_timing.h_total)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_total != hw_crtc_timing.v_total)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
|
||||
return false;
|
||||
|
||||
if (dc_is_dp_signal(link->connector_signal)) {
|
||||
@ -1033,6 +1093,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
|
||||
return false;
|
||||
|
||||
if (!se->funcs->dp_get_pixel_format)
|
||||
return false;
|
||||
|
||||
if (!se->funcs->dp_get_pixel_format(
|
||||
se,
|
||||
&hw_crtc_timing.pixel_encoding,
|
||||
&hw_crtc_timing.display_color_depth))
|
||||
return false;
|
||||
|
||||
if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
|
||||
return false;
|
||||
|
||||
if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -1660,8 +1734,16 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
|
||||
dc->optimized_required = true;
|
||||
if (type == UPDATE_TYPE_FAST) {
|
||||
// If there's an available clock comparator, we use that.
|
||||
if (dc->clk_mgr->funcs->are_clock_states_equal) {
|
||||
if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
|
||||
dc->optimized_required = true;
|
||||
// Else we fallback to mem compare.
|
||||
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
|
||||
dc->optimized_required = true;
|
||||
}
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
@ -743,7 +743,8 @@ static bool wait_for_alt_mode(struct dc_link *link)
|
||||
* This does not create remote sinks but will trigger DM
|
||||
* to start MST detection if a branch is detected.
|
||||
*/
|
||||
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
static bool dc_link_detect_helper(struct dc_link *link,
|
||||
enum dc_detect_reason reason)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct display_sink_capability sink_caps = { 0 };
|
||||
@ -759,6 +760,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
bool same_dpcd = true;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
bool perform_dp_seamless_boot = false;
|
||||
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
if (dc_is_virtual_signal(link->connector_signal))
|
||||
@ -871,7 +873,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
* empty which leads to allocate_mst_payload() has "0"
|
||||
* pbn_per_slot value leading to exception on dc_fixpt_div()
|
||||
*/
|
||||
link->verified_link_cap = link->reported_link_cap;
|
||||
dp_verify_mst_link_cap(link);
|
||||
|
||||
if (prev_sink != NULL)
|
||||
dc_sink_release(prev_sink);
|
||||
return false;
|
||||
@ -1065,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
dc_sink_release(prev_sink);
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
{
|
||||
const struct dc *dc = link->dc;
|
||||
bool ret;
|
||||
|
||||
/* get out of low power state */
|
||||
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
|
||||
|
||||
ret = dc_link_detect_helper(link, reason);
|
||||
|
||||
/* Go back to power optimized state */
|
||||
clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool dc_link_get_hpd_state(struct dc_link *dc_link)
|
||||
@ -1510,7 +1530,7 @@ static enum dc_status enable_link_dp(
|
||||
|
||||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
if (!apply_seamless_boot_optimization)
|
||||
if (state->clk_mgr && !apply_seamless_boot_optimization)
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
|
||||
|
||||
dp_enable_link_phy(
|
||||
@ -2237,7 +2257,7 @@ static bool dp_active_dongle_validate_timing(
|
||||
break;
|
||||
}
|
||||
|
||||
if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
|
||||
if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
|
||||
dongle_caps->extendedCapValid == false)
|
||||
return true;
|
||||
|
||||
@ -2401,13 +2421,17 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
|
||||
bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
|
||||
{
|
||||
struct dc *core_dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = core_dc->res_pool->dmcu;
|
||||
|
||||
if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
|
||||
dmcu->funcs->set_psr_enable(dmcu, enable, wait);
|
||||
|
||||
|
||||
if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
|
||||
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
|
||||
|
||||
link->psr_allow_active = allow_active;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -2718,6 +2742,10 @@ void core_link_enable_stream(
|
||||
enum dc_status status;
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
|
||||
stream->link->link_enc->funcs->setup(
|
||||
stream->link->link_enc,
|
||||
@ -2860,6 +2888,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->sink->link;
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
||||
update_psp_stream_config(pipe_ctx, true);
|
||||
#endif
|
||||
|
@ -634,6 +634,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
return dce_aux_transfer_with_retries(ddc, payload);
|
||||
}
|
||||
|
||||
|
||||
enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
|
||||
uint32_t timeout)
|
||||
{
|
||||
enum dc_status status = DC_OK;
|
||||
struct ddc *ddc_pin = ddc->ddc_pin;
|
||||
|
||||
if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
|
||||
status = DC_ERROR_UNEXPECTED;
|
||||
return status;
|
||||
}
|
||||
|
||||
/*test only function*/
|
||||
void dal_ddc_service_set_ddc_pin(
|
||||
struct ddc_service *ddc_service,
|
||||
|
@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
||||
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
|
||||
max_link_cap.link_rate = LINK_RATE_HIGH3;
|
||||
|
||||
if (link->link_enc->funcs->get_max_link_cap)
|
||||
link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
|
||||
|
||||
/* Lower link settings based on sink's link cap */
|
||||
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
|
||||
max_link_cap.lane_count =
|
||||
@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries(
|
||||
|
||||
for (i = 0; i < attempts; i++) {
|
||||
int fail_count = 0;
|
||||
enum dc_connection_type type;
|
||||
enum dc_connection_type type = dc_connection_none;
|
||||
|
||||
memset(&link->verified_link_cap, 0,
|
||||
sizeof(struct dc_link_settings));
|
||||
if (!dc_link_detect_sink(link, &type)) {
|
||||
if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
|
||||
link->verified_link_cap.lane_count = LANE_COUNT_ONE;
|
||||
link->verified_link_cap.link_rate = LINK_RATE_LOW;
|
||||
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
|
||||
break;
|
||||
} else if (dp_verify_link_cap(link,
|
||||
&link->reported_link_cap,
|
||||
@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries(
|
||||
return success;
|
||||
}
|
||||
|
||||
bool dp_verify_mst_link_cap(
|
||||
struct dc_link *link)
|
||||
{
|
||||
struct dc_link_settings max_link_cap = {0};
|
||||
|
||||
max_link_cap = get_max_link_cap(link);
|
||||
link->verified_link_cap = get_common_supported_link_settings(
|
||||
link->reported_link_cap,
|
||||
max_link_cap);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct dc_link_settings get_common_supported_link_settings(
|
||||
struct dc_link_settings link_setting_a,
|
||||
struct dc_link_settings link_setting_b)
|
||||
@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
|
||||
static bool handle_hpd_irq_psr_sink(struct dc_link *link)
|
||||
{
|
||||
union dpcd_psr_configuration psr_configuration;
|
||||
|
||||
if (!link->psr_enabled)
|
||||
if (!link->psr_feature_enabled)
|
||||
return false;
|
||||
|
||||
dm_helpers_dp_read_dpcd(
|
||||
@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
|
||||
sizeof(psr_error_status.raw));
|
||||
|
||||
/* PSR error, disable and re-enable PSR */
|
||||
dc_link_set_psr_enable(link, false, true);
|
||||
dc_link_set_psr_enable(link, true, true);
|
||||
dc_link_set_psr_allow_active(link, false, true);
|
||||
dc_link_set_psr_allow_active(link, true, true);
|
||||
|
||||
return true;
|
||||
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
|
||||
@ -2556,6 +2575,7 @@ static void get_active_converter_info(
|
||||
uint8_t data, struct dc_link *link)
|
||||
{
|
||||
union dp_downstream_port_present ds_port = { .byte = data };
|
||||
memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
|
||||
|
||||
/* decode converter info*/
|
||||
if (!ds_port.fields.PORT_PRESENT) {
|
||||
@ -2702,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
|
||||
* keep receiver powered all the time.*/
|
||||
case DP_BRANCH_DEVICE_ID_0010FA:
|
||||
case DP_BRANCH_DEVICE_ID_0080E1:
|
||||
case DP_BRANCH_DEVICE_ID_00E04C:
|
||||
link->wa_flags.dp_keep_receiver_powered = true;
|
||||
break;
|
||||
|
||||
|
@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
|
||||
|
||||
if (dwb->funcs->is_enabled(dwb)) {
|
||||
/* writeback pipe already enabled, only need to update */
|
||||
dc->hwss.update_writeback(dc, stream_status, wb_info);
|
||||
dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
|
||||
} else {
|
||||
/* Enable writeback pipe from scratch*/
|
||||
dc->hwss.enable_writeback(dc, stream_status, wb_info);
|
||||
dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.51.1"
|
||||
#define DC_VER "3.2.54"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -111,6 +111,7 @@ struct dc_caps {
|
||||
bool force_dp_tps4_for_cp2520;
|
||||
bool disable_dp_clk_share;
|
||||
bool psp_setup_panel_mode;
|
||||
bool extended_aux_timeout_support;
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
bool hw_3d_lut;
|
||||
#endif
|
||||
@ -220,6 +221,7 @@ struct dc_config {
|
||||
bool power_down_display_on_boot;
|
||||
bool edp_not_connected;
|
||||
bool forced_clocks;
|
||||
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
|
||||
bool multi_mon_pp_mclk_switch;
|
||||
};
|
||||
|
||||
@ -245,6 +247,18 @@ enum wm_report_mode {
|
||||
WM_REPORT_DEFAULT = 0,
|
||||
WM_REPORT_OVERRIDE = 1,
|
||||
};
|
||||
enum dtm_pstate{
|
||||
dtm_level_p0 = 0,/*highest voltage*/
|
||||
dtm_level_p1,
|
||||
dtm_level_p2,
|
||||
dtm_level_p3,
|
||||
dtm_level_p4,/*when active_display_count = 0*/
|
||||
};
|
||||
|
||||
enum dcn_pwr_state {
|
||||
DCN_PWR_STATE_OPTIMIZED = 0,
|
||||
DCN_PWR_STATE_NORMAL = 1
|
||||
};
|
||||
|
||||
/*
|
||||
* For any clocks that may differ per pipe
|
||||
@ -260,12 +274,13 @@ struct dc_clocks {
|
||||
int phyclk_khz;
|
||||
int dramclk_khz;
|
||||
bool p_state_change_support;
|
||||
|
||||
enum dcn_pwr_state pwr_state;
|
||||
/*
|
||||
* Elements below are not compared for the purposes of
|
||||
* optimization required
|
||||
*/
|
||||
bool prev_p_state_change_support;
|
||||
enum dtm_pstate dtm_level;
|
||||
int max_supported_dppclk_khz;
|
||||
int max_supported_dispclk_khz;
|
||||
int bw_dppclk_khz; /*a copy of dppclk_khz*/
|
||||
|
@ -578,6 +578,11 @@ enum dc_quantization_range {
|
||||
QUANTIZATION_RANGE_LIMITED
|
||||
};
|
||||
|
||||
enum dc_dynamic_expansion {
|
||||
DYN_EXPANSION_AUTO,
|
||||
DYN_EXPANSION_DISABLE
|
||||
};
|
||||
|
||||
/* XFM */
|
||||
|
||||
/* used in struct dc_plane_state */
|
||||
|
@ -126,7 +126,8 @@ struct dc_link {
|
||||
unsigned short chip_caps;
|
||||
unsigned int dpcd_sink_count;
|
||||
enum edp_revision edp_revision;
|
||||
bool psr_enabled;
|
||||
bool psr_feature_enabled;
|
||||
bool psr_allow_active;
|
||||
|
||||
/* MST record stream using this link */
|
||||
struct link_flags {
|
||||
@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
|
||||
return dc->links[link_index];
|
||||
}
|
||||
|
||||
static inline struct dc_link *get_edp_link(const struct dc *dc)
|
||||
{
|
||||
int i;
|
||||
|
||||
// report any eDP links, even unconnected DDI's
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
|
||||
return dc->links[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Set backlight level of an embedded panel (eDP, LVDS).
|
||||
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
|
||||
* and 16 bit fractional, where 1.0 is max backlight value.
|
||||
@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
|
||||
|
||||
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
|
||||
|
||||
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
|
||||
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
|
||||
|
||||
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
|
||||
|
||||
|
@ -451,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
|
||||
int num_streams,
|
||||
const struct dc_static_screen_events *events);
|
||||
|
||||
void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
|
||||
enum dc_dynamic_expansion option);
|
||||
|
||||
void dc_stream_set_dither_option(struct dc_stream_state *stream,
|
||||
enum dc_dither_option option);
|
||||
|
||||
|
@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
|
||||
/* notifyDMCUMsg */
|
||||
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
||||
|
||||
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
|
||||
1, 80000);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,10 @@
|
||||
|
||||
#include "reg_helper.h"
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
aux110->shift->field_name, aux110->mask->field_name
|
||||
|
||||
#define FROM_AUX_ENGINE(ptr) \
|
||||
container_of((ptr), struct aux_engine_dce110, base)
|
||||
|
||||
@ -55,6 +59,14 @@ enum {
|
||||
AUX_TIMED_OUT_RETRY_COUNTER = 2,
|
||||
AUX_DEFER_RETRY_COUNTER = 6
|
||||
};
|
||||
|
||||
#define TIME_OUT_INCREMENT 1016
|
||||
#define TIME_OUT_MULTIPLIER_8 8
|
||||
#define TIME_OUT_MULTIPLIER_16 16
|
||||
#define TIME_OUT_MULTIPLIER_32 32
|
||||
#define TIME_OUT_MULTIPLIER_64 64
|
||||
#define MAX_TIMEOUT_LENGTH 127
|
||||
|
||||
static void release_engine(
|
||||
struct dce_aux *engine)
|
||||
{
|
||||
@ -198,7 +210,7 @@ static void submit_channel_request(
|
||||
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
|
||||
|
||||
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
|
||||
10, aux110->timeout_period/10);
|
||||
10, aux110->polling_timeout_period/10);
|
||||
|
||||
/* set the delay and the number of bytes to write */
|
||||
|
||||
@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status(
|
||||
|
||||
/* poll to make sure that SW_DONE is asserted */
|
||||
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
|
||||
10, aux110->timeout_period/10);
|
||||
10, aux110->polling_timeout_period/10);
|
||||
|
||||
value = REG_READ(AUX_SW_STATUS);
|
||||
/* in case HPD is LOW, exit AUX transaction */
|
||||
@ -414,20 +426,82 @@ void dce110_engine_destroy(struct dce_aux **engine)
|
||||
*engine = NULL;
|
||||
|
||||
}
|
||||
|
||||
static bool dce_aux_configure_timeout(struct ddc_service *ddc,
|
||||
uint32_t timeout_in_us)
|
||||
{
|
||||
uint32_t multiplier = 0;
|
||||
uint32_t length = 0;
|
||||
uint32_t timeout = 0;
|
||||
struct ddc *ddc_pin = ddc->ddc_pin;
|
||||
struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
|
||||
struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
|
||||
|
||||
/* 1-Update polling timeout period */
|
||||
aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
|
||||
|
||||
/* 2-Update aux timeout period length and multiplier */
|
||||
if (timeout_in_us <= TIME_OUT_INCREMENT) {
|
||||
multiplier = 0;
|
||||
length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
|
||||
if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
|
||||
length++;
|
||||
timeout = length * TIME_OUT_MULTIPLIER_8;
|
||||
} else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
|
||||
multiplier = 1;
|
||||
length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
|
||||
if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
|
||||
length++;
|
||||
timeout = length * TIME_OUT_MULTIPLIER_16;
|
||||
} else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
|
||||
multiplier = 2;
|
||||
length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
|
||||
if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
|
||||
length++;
|
||||
timeout = length * TIME_OUT_MULTIPLIER_32;
|
||||
} else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
|
||||
multiplier = 3;
|
||||
length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
|
||||
if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
|
||||
length++;
|
||||
timeout = length * TIME_OUT_MULTIPLIER_64;
|
||||
}
|
||||
|
||||
length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
|
||||
|
||||
REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct dce_aux_funcs aux_functions = {
|
||||
.configure_timeout = NULL,
|
||||
.destroy = NULL,
|
||||
};
|
||||
|
||||
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst,
|
||||
uint32_t timeout_period,
|
||||
const struct dce110_aux_registers *regs)
|
||||
const struct dce110_aux_registers *regs,
|
||||
const struct dce110_aux_registers_mask *mask,
|
||||
const struct dce110_aux_registers_shift *shift,
|
||||
bool is_ext_aux_timeout_configurable)
|
||||
{
|
||||
aux_engine110->base.ddc = NULL;
|
||||
aux_engine110->base.ctx = ctx;
|
||||
aux_engine110->base.delay = 0;
|
||||
aux_engine110->base.max_defer_write_retry = 0;
|
||||
aux_engine110->base.inst = inst;
|
||||
aux_engine110->timeout_period = timeout_period;
|
||||
aux_engine110->polling_timeout_period = timeout_period;
|
||||
aux_engine110->regs = regs;
|
||||
|
||||
aux_engine110->mask = mask;
|
||||
aux_engine110->shift = shift;
|
||||
aux_engine110->base.funcs = &aux_functions;
|
||||
if (is_ext_aux_timeout_configurable)
|
||||
aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
|
||||
|
||||
return &aux_engine110->base;
|
||||
}
|
||||
|
||||
@ -475,7 +549,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
|
||||
aux_req.action = i2caux_action_from_payload(payload);
|
||||
|
||||
aux_req.address = payload->address;
|
||||
aux_req.delay = payload->defer_delay * 10;
|
||||
aux_req.delay = 0;
|
||||
aux_req.length = payload->length;
|
||||
aux_req.data = payload->data;
|
||||
|
||||
@ -544,8 +618,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
case AUX_TRANSACTION_REPLY_AUX_DEFER:
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
|
||||
goto fail;
|
||||
} else {
|
||||
if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
|
||||
(*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
|
||||
if (payload->defer_delay > 0)
|
||||
msleep(payload->defer_delay);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_I2C_DEFER:
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "i2caux_interface.h"
|
||||
#include "inc/hw/aux_engine.h"
|
||||
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
#define AUX_COMMON_REG_LIST0(id)\
|
||||
SRI(AUX_CONTROL, DP_AUX, id), \
|
||||
@ -36,6 +37,7 @@
|
||||
SRI(AUX_SW_DATA, DP_AUX, id), \
|
||||
SRI(AUX_SW_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
|
||||
SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
|
||||
SRI(AUX_SW_STATUS, DP_AUX, id)
|
||||
#endif
|
||||
|
||||
@ -55,6 +57,7 @@ struct dce110_aux_registers {
|
||||
uint32_t AUX_SW_DATA;
|
||||
uint32_t AUX_SW_CONTROL;
|
||||
uint32_t AUX_INTERRUPT_CONTROL;
|
||||
uint32_t AUX_DPHY_RX_CONTROL1;
|
||||
uint32_t AUX_SW_STATUS;
|
||||
uint32_t AUXN_IMPCAL;
|
||||
uint32_t AUXP_IMPCAL;
|
||||
@ -62,6 +65,156 @@ struct dce110_aux_registers {
|
||||
uint32_t AUX_RESET_MASK;
|
||||
};
|
||||
|
||||
#define DCE_AUX_REG_FIELD_LIST(type)\
|
||||
type AUX_EN;\
|
||||
type AUX_RESET;\
|
||||
type AUX_RESET_DONE;\
|
||||
type AUX_REG_RW_CNTL_STATUS;\
|
||||
type AUX_SW_USE_AUX_REG_REQ;\
|
||||
type AUX_SW_DONE_USING_AUX_REG;\
|
||||
type AUX_SW_AUTOINCREMENT_DISABLE;\
|
||||
type AUX_SW_DATA_RW;\
|
||||
type AUX_SW_INDEX;\
|
||||
type AUX_SW_GO;\
|
||||
type AUX_SW_DATA;\
|
||||
type AUX_SW_REPLY_BYTE_COUNT;\
|
||||
type AUX_SW_DONE;\
|
||||
type AUX_SW_DONE_ACK;\
|
||||
type AUXN_IMPCAL_ENABLE;\
|
||||
type AUXP_IMPCAL_ENABLE;\
|
||||
type AUXN_IMPCAL_OVERRIDE_ENABLE;\
|
||||
type AUXP_IMPCAL_OVERRIDE_ENABLE;\
|
||||
type AUX_RX_TIMEOUT_LEN;\
|
||||
type AUX_RX_TIMEOUT_LEN_MUL;\
|
||||
type AUXN_CALOUT_ERROR_AK;\
|
||||
type AUXP_CALOUT_ERROR_AK;\
|
||||
type AUX_SW_START_DELAY;\
|
||||
type AUX_SW_WR_BYTES
|
||||
|
||||
#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
|
||||
AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
||||
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
||||
AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
|
||||
AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
|
||||
|
||||
#define DCE_AUX_MASK_SH_LIST(mask_sh)\
|
||||
AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
|
||||
AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
|
||||
AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
|
||||
AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
|
||||
AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
||||
AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
||||
AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
||||
AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
|
||||
AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
|
||||
|
||||
#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
|
||||
|
||||
/* DCN10 MASK */
|
||||
#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
|
||||
AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
|
||||
|
||||
/* for all other DCN */
|
||||
#define DCN_AUX_MASK_SH_LIST(mask_sh)\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
|
||||
AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
|
||||
|
||||
#define AUX_SF(reg_name, field_name, post_fix)\
|
||||
.field_name = reg_name ## __ ## field_name ## post_fix
|
||||
|
||||
enum { /* This is the timeout as defined in DP 1.2a,
|
||||
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
|
||||
*/
|
||||
@ -97,20 +250,34 @@ struct dce_aux {
|
||||
uint32_t max_defer_write_retry;
|
||||
|
||||
bool acquire_reset;
|
||||
struct dce_aux_funcs *funcs;
|
||||
};
|
||||
|
||||
struct dce110_aux_registers_mask {
|
||||
DCE_AUX_REG_FIELD_LIST(uint32_t);
|
||||
};
|
||||
|
||||
struct dce110_aux_registers_shift {
|
||||
DCE_AUX_REG_FIELD_LIST(uint8_t);
|
||||
};
|
||||
|
||||
|
||||
struct aux_engine_dce110 {
|
||||
struct dce_aux base;
|
||||
const struct dce110_aux_registers *regs;
|
||||
const struct dce110_aux_registers_mask *mask;
|
||||
const struct dce110_aux_registers_shift *shift;
|
||||
struct {
|
||||
uint32_t aux_control;
|
||||
uint32_t aux_arb_control;
|
||||
uint32_t aux_sw_data;
|
||||
uint32_t aux_sw_control;
|
||||
uint32_t aux_interrupt_control;
|
||||
uint32_t aux_dphy_rx_control1;
|
||||
uint32_t aux_dphy_rx_control0;
|
||||
uint32_t aux_sw_status;
|
||||
} addr;
|
||||
uint32_t timeout_period;
|
||||
uint32_t polling_timeout_period;
|
||||
};
|
||||
|
||||
struct aux_engine_dce110_init_data {
|
||||
@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data {
|
||||
const struct dce110_aux_registers *regs;
|
||||
};
|
||||
|
||||
struct dce_aux *dce110_aux_engine_construct(
|
||||
struct aux_engine_dce110 *aux_engine110,
|
||||
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst,
|
||||
uint32_t timeout_period,
|
||||
const struct dce110_aux_registers *regs);
|
||||
const struct dce110_aux_registers *regs,
|
||||
|
||||
const struct dce110_aux_registers_mask *mask,
|
||||
const struct dce110_aux_registers_shift *shift,
|
||||
bool is_ext_aux_timeout_configurable);
|
||||
|
||||
void dce110_engine_destroy(struct dce_aux **engine);
|
||||
|
||||
@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
|
||||
|
||||
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
struct aux_payload *cmd);
|
||||
|
||||
struct dce_aux_funcs {
|
||||
bool (*configure_timeout)
|
||||
(struct ddc_service *ddc,
|
||||
uint32_t timeout);
|
||||
void (*destroy)
|
||||
(struct aux_engine **ptr);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -679,6 +679,7 @@ struct dce_hwseq_registers {
|
||||
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
#endif
|
||||
|
@ -506,6 +506,14 @@ static const struct dce_mem_input_mask mi_masks = {
|
||||
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCE10_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCE10_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
static struct mem_input *dce100_mem_input_create(
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst)
|
||||
@ -611,7 +619,10 @@ struct dce_aux *dce100_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -997,6 +1008,8 @@ static bool construct(
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
dc->caps.disable_dp_clk_share = true;
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
|
||||
for (i = 0; i < pool->base.pipe_count; i++) {
|
||||
pool->base.timing_generators[i] =
|
||||
dce100_timing_generator_create(
|
||||
|
@ -1161,8 +1161,9 @@ static void build_audio_output(
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
if (state->clk_mgr &&
|
||||
(pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
|
||||
audio_output->pll_info.dp_dto_source_clock_in_khz =
|
||||
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
|
||||
state->clk_mgr);
|
||||
@ -1410,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
||||
|
||||
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
|
||||
|
||||
pipe_ctx->stream->link->psr_enabled = false;
|
||||
pipe_ctx->stream->link->psr_feature_enabled = false;
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
@ -1521,18 +1522,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dc_link *get_edp_link(struct dc *dc)
|
||||
{
|
||||
int i;
|
||||
|
||||
// report any eDP links, even unconnected DDI's
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
|
||||
return dc->links[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dc_link *get_edp_link_with_sink(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
@ -1826,7 +1815,7 @@ static bool should_enable_fbc(struct dc *dc,
|
||||
return false;
|
||||
|
||||
/* PSR should not be enabled */
|
||||
if (pipe_ctx->stream->link->psr_enabled)
|
||||
if (pipe_ctx->stream->link->psr_feature_enabled)
|
||||
return false;
|
||||
|
||||
/* Nothing to compress */
|
||||
|
@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
|
||||
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCE_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCE_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
#define opp_regs(id)\
|
||||
[id] = {\
|
||||
OPP_DCE_110_REG_LIST(id),\
|
||||
@ -657,7 +665,10 @@ struct dce_aux *dce110_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -1293,6 +1304,7 @@ static bool construct(
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.is_apu = true;
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
|
||||
ABM_MASK_SH_LIST_DCE110(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCE_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCE_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
#define ipp_regs(id)\
|
||||
[id] = {\
|
||||
IPP_DCE110_REG_LIST_DCE_BASE(id)\
|
||||
@ -630,7 +638,10 @@ struct dce_aux *dce112_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -1163,7 +1174,7 @@ static bool construct(
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
|
||||
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCE12_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCE12_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
#define opp_regs(id)\
|
||||
[id] = {\
|
||||
OPP_DCE_120_REG_LIST(id),\
|
||||
@ -404,7 +412,10 @@ struct dce_aux *dce120_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -1006,7 +1017,7 @@ static bool construct(
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
dc->caps.psp_setup_panel_mode = true;
|
||||
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
dc->debug = debug_defaults;
|
||||
|
||||
/*************************************************
|
||||
|
@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
|
||||
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCE10_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCE10_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
#define aux_engine_regs(id)\
|
||||
[id] = {\
|
||||
AUX_COMMON_REG_LIST(id), \
|
||||
@ -491,7 +499,10 @@ struct dce_aux *dce80_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -895,6 +906,7 @@ static bool dce80_construct(
|
||||
dc->caps.i2c_speed_in_khz = 40;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
|
||||
|
||||
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
|
||||
|
||||
static bool dpp_get_optimal_number_of_taps(
|
||||
bool dpp1_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps)
|
||||
@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
|
||||
.dpp_read_state = dpp_read_state,
|
||||
.dpp_reset = dpp_reset,
|
||||
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
|
||||
.dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
|
||||
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
|
||||
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
|
||||
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
|
||||
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
|
||||
|
@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier(
|
||||
struct dpp *dpp_base,
|
||||
uint32_t multiplier);
|
||||
|
||||
bool dpp1_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps);
|
||||
|
||||
void dpp1_construct(struct dcn10_dpp *dpp1,
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst,
|
||||
|
@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc)
|
||||
int i;
|
||||
bool allow_self_fresh_force_enable = true;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
|
||||
return;
|
||||
#endif
|
||||
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
|
||||
allow_self_fresh_force_enable =
|
||||
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
|
||||
@ -1452,15 +1456,15 @@ static void log_tf(struct dc_context *ctx,
|
||||
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
|
||||
|
||||
for (i = 0; i < hw_points_num; i++) {
|
||||
DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
|
||||
DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
|
||||
}
|
||||
|
||||
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
|
||||
DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
|
||||
DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
|
||||
DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2511,8 +2515,10 @@ static void program_all_pipe_in_tree(
|
||||
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
|
||||
if (dc->hwss.setup_vupdate_interrupt)
|
||||
dc->hwss.setup_vupdate_interrupt(pipe_ctx);
|
||||
|
||||
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
|
||||
}
|
||||
|
||||
if (pipe_ctx->plane_state != NULL)
|
||||
|
@ -113,6 +113,20 @@ struct dcn10_link_enc_registers {
|
||||
uint32_t DIG_LANE_ENABLE;
|
||||
/* UNIPHY */
|
||||
uint32_t CHANNEL_XBAR_CNTL;
|
||||
/* DPCS */
|
||||
uint32_t RDPCSTX_PHY_CNTL3;
|
||||
uint32_t RDPCSTX_PHY_CNTL4;
|
||||
uint32_t RDPCSTX_PHY_CNTL5;
|
||||
uint32_t RDPCSTX_PHY_CNTL6;
|
||||
uint32_t RDPCSTX_PHY_CNTL7;
|
||||
uint32_t RDPCSTX_PHY_CNTL8;
|
||||
uint32_t RDPCSTX_PHY_CNTL9;
|
||||
uint32_t RDPCSTX_PHY_CNTL10;
|
||||
uint32_t RDPCSTX_PHY_CNTL11;
|
||||
uint32_t RDPCSTX_PHY_CNTL12;
|
||||
uint32_t RDPCSTX_PHY_CNTL13;
|
||||
uint32_t RDPCSTX_PHY_CNTL14;
|
||||
uint32_t RDPCSTX_PHY_CNTL15;
|
||||
/* indirect registers */
|
||||
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
|
||||
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
|
||||
@ -250,6 +264,10 @@ struct dcn10_link_enc_registers {
|
||||
type RDPCS_EXT_REFCLK_EN;\
|
||||
type RDPCS_TX_FIFO_EN;\
|
||||
type UNIPHY_LINK_ENABLE;\
|
||||
type UNIPHY_CHANNEL0_XBAR_SOURCE;\
|
||||
type UNIPHY_CHANNEL1_XBAR_SOURCE;\
|
||||
type UNIPHY_CHANNEL2_XBAR_SOURCE;\
|
||||
type UNIPHY_CHANNEL3_XBAR_SOURCE;\
|
||||
type UNIPHY_CHANNEL0_INVERT;\
|
||||
type UNIPHY_CHANNEL1_INVERT;\
|
||||
type UNIPHY_CHANNEL2_INVERT;\
|
||||
@ -337,16 +355,46 @@ struct dcn10_link_enc_registers {
|
||||
type RDPCS_TX_FIFO_ERROR_MASK;\
|
||||
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
|
||||
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
|
||||
type RDPCS_PHY_DPALT_DP4;\
|
||||
type RDPCS_PHY_DPALT_DISABLE;\
|
||||
type RDPCS_PHY_DPALT_DISABLE_ACK;\
|
||||
type RDPCS_PHY_DP_MPLLB_V2I;\
|
||||
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
|
||||
type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
|
||||
type RDPCS_PHY_RX_VREF_CTRL;\
|
||||
type RDPCS_PHY_DP_MPLLB_CP_INT;\
|
||||
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
|
||||
type RDPCS_PHY_RX_REF_LD_VAL;\
|
||||
type RDPCS_PHY_RX_VCO_LD_VAL;\
|
||||
type DPCSTX_DEBUG_CONFIG; \
|
||||
type RDPCSTX_DEBUG_CONFIG
|
||||
type RDPCSTX_DEBUG_CONFIG; \
|
||||
type RDPCS_PHY_DP_TX0_EQ_MAIN;\
|
||||
type RDPCS_PHY_DP_TX0_EQ_PRE;\
|
||||
type RDPCS_PHY_DP_TX0_EQ_POST;\
|
||||
type RDPCS_PHY_DP_TX1_EQ_MAIN;\
|
||||
type RDPCS_PHY_DP_TX1_EQ_PRE;\
|
||||
type RDPCS_PHY_DP_TX1_EQ_POST;\
|
||||
type RDPCS_PHY_DP_TX2_EQ_MAIN;\
|
||||
type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
|
||||
type RDPCS_PHY_DP_TX2_EQ_PRE;\
|
||||
type RDPCS_PHY_DP_TX2_EQ_POST;\
|
||||
type RDPCS_PHY_DP_TX3_EQ_MAIN;\
|
||||
type RDPCS_PHY_DCO_RANGE;\
|
||||
type RDPCS_PHY_DCO_FINETUNE;\
|
||||
type RDPCS_PHY_DP_TX3_EQ_PRE;\
|
||||
type RDPCS_PHY_DP_TX3_EQ_POST;\
|
||||
type RDPCS_PHY_SUP_PRE_HP;\
|
||||
type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
|
||||
type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
|
||||
type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
|
||||
type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
|
||||
type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
|
||||
type UNIPHYA_SOFT_RESET;\
|
||||
type UNIPHYB_SOFT_RESET;\
|
||||
type UNIPHYC_SOFT_RESET;\
|
||||
type UNIPHYD_SOFT_RESET;\
|
||||
type UNIPHYE_SOFT_RESET;\
|
||||
type UNIPHYF_SOFT_RESET
|
||||
|
||||
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
|
||||
type DIG_LANE0EN;\
|
||||
|
@ -240,6 +240,9 @@ void opp1_set_dyn_expansion(
|
||||
FMT_DYNAMIC_EXP_EN, 0,
|
||||
FMT_DYNAMIC_EXP_MODE, 0);
|
||||
|
||||
if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
|
||||
return;
|
||||
|
||||
/*00 - 10-bit -> 12-bit dynamic expansion*/
|
||||
/*01 - 8-bit -> 12-bit dynamic expansion*/
|
||||
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
|
||||
|
@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool optc1_is_matching_timing(struct timing_generator *tg,
|
||||
const struct dc_crtc_timing *otg_timing)
|
||||
bool optc1_get_hw_timing(struct timing_generator *tg,
|
||||
struct dc_crtc_timing *hw_crtc_timing)
|
||||
{
|
||||
struct dc_crtc_timing hw_crtc_timing = {0};
|
||||
struct dcn_otg_state s = {0};
|
||||
|
||||
if (tg == NULL || otg_timing == NULL)
|
||||
if (tg == NULL || hw_crtc_timing == NULL)
|
||||
return false;
|
||||
|
||||
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
|
||||
|
||||
hw_crtc_timing.h_total = s.h_total + 1;
|
||||
hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
|
||||
hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
|
||||
hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
|
||||
hw_crtc_timing->h_total = s.h_total + 1;
|
||||
hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
|
||||
hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
|
||||
hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
|
||||
|
||||
hw_crtc_timing.v_total = s.v_total + 1;
|
||||
hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
|
||||
hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
|
||||
hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
|
||||
|
||||
if (otg_timing->h_total != hw_crtc_timing.h_total)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_total != hw_crtc_timing.v_total)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
|
||||
return false;
|
||||
hw_crtc_timing->v_total = s.v_total + 1;
|
||||
hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
|
||||
hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
|
||||
hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
|
||||
.get_frame_count = optc1_get_vblank_counter,
|
||||
.get_scanoutpos = optc1_get_crtc_scanoutpos,
|
||||
.get_otg_active_size = optc1_get_otg_active_size,
|
||||
.is_matching_timing = optc1_is_matching_timing,
|
||||
.set_early_control = optc1_set_early_control,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.wait_for_state = optc1_wait_for_state,
|
||||
@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
|
||||
.configure_crc = optc1_configure_crc,
|
||||
.set_vtg_params = optc1_set_vtg_params,
|
||||
.program_manual_trigger = optc1_program_manual_trigger,
|
||||
.setup_manual_trigger = optc1_setup_manual_trigger
|
||||
.setup_manual_trigger = optc1_setup_manual_trigger,
|
||||
.get_hw_timing = optc1_get_hw_timing,
|
||||
};
|
||||
|
||||
void dcn10_timing_generator_init(struct optc *optc1)
|
||||
|
@ -547,9 +547,8 @@ struct dcn_otg_state {
|
||||
void optc1_read_otg_state(struct optc *optc1,
|
||||
struct dcn_otg_state *s);
|
||||
|
||||
bool optc1_is_matching_timing(
|
||||
struct timing_generator *tg,
|
||||
const struct dc_crtc_timing *otg_timing);
|
||||
bool optc1_get_hw_timing(struct timing_generator *tg,
|
||||
struct dc_crtc_timing *hw_crtc_timing);
|
||||
|
||||
bool optc1_validate_timing(
|
||||
struct timing_generator *optc,
|
||||
|
@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = {
|
||||
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCN10_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCN10_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
#define ipp_regs(id)\
|
||||
[id] = {\
|
||||
IPP_REG_LIST_DCN10(id),\
|
||||
@ -642,7 +650,10 @@ struct dce_aux *dcn10_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -1308,6 +1319,8 @@ static bool construct(
|
||||
dc->caps.max_slave_planes = 1;
|
||||
dc->caps.is_apu = true;
|
||||
dc->caps.post_blend_color_processing = false;
|
||||
dc->caps.extended_aux_timeout_support = false;
|
||||
|
||||
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
|
||||
dc->caps.force_dp_tps4_for_cp2520 = true;
|
||||
|
||||
|
@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg(
|
||||
return tg_inst;
|
||||
}
|
||||
|
||||
bool enc1_stream_encoder_dp_get_pixel_format(
|
||||
struct stream_encoder *enc,
|
||||
enum dc_pixel_encoding *encoding,
|
||||
enum dc_color_depth *depth)
|
||||
{
|
||||
uint32_t hw_encoding = 0;
|
||||
uint32_t hw_depth = 0;
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
if (enc == NULL ||
|
||||
encoding == NULL ||
|
||||
depth == NULL)
|
||||
return false;
|
||||
|
||||
REG_GET_2(DP_PIXEL_FORMAT,
|
||||
DP_PIXEL_ENCODING, &hw_encoding,
|
||||
DP_COMPONENT_DEPTH, &hw_depth);
|
||||
|
||||
switch (hw_depth) {
|
||||
case DP_COMPONENT_PIXEL_DEPTH_6BPC:
|
||||
*depth = COLOR_DEPTH_666;
|
||||
break;
|
||||
case DP_COMPONENT_PIXEL_DEPTH_8BPC:
|
||||
*depth = COLOR_DEPTH_888;
|
||||
break;
|
||||
case DP_COMPONENT_PIXEL_DEPTH_10BPC:
|
||||
*depth = COLOR_DEPTH_101010;
|
||||
break;
|
||||
case DP_COMPONENT_PIXEL_DEPTH_12BPC:
|
||||
*depth = COLOR_DEPTH_121212;
|
||||
break;
|
||||
case DP_COMPONENT_PIXEL_DEPTH_16BPC:
|
||||
*depth = COLOR_DEPTH_161616;
|
||||
break;
|
||||
default:
|
||||
*depth = COLOR_DEPTH_UNDEFINED;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (hw_encoding) {
|
||||
case DP_PIXEL_ENCODING_TYPE_RGB444:
|
||||
*encoding = PIXEL_ENCODING_RGB;
|
||||
break;
|
||||
case DP_PIXEL_ENCODING_TYPE_YCBCR422:
|
||||
*encoding = PIXEL_ENCODING_YCBCR422;
|
||||
break;
|
||||
case DP_PIXEL_ENCODING_TYPE_YCBCR444:
|
||||
case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
|
||||
*encoding = PIXEL_ENCODING_YCBCR444;
|
||||
break;
|
||||
case DP_PIXEL_ENCODING_TYPE_YCBCR420:
|
||||
*encoding = PIXEL_ENCODING_YCBCR420;
|
||||
break;
|
||||
default:
|
||||
*encoding = PIXEL_ENCODING_UNDEFINED;
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
|
||||
.dp_set_stream_attribute =
|
||||
enc1_stream_encoder_dp_set_stream_attribute,
|
||||
@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
|
||||
.dig_connect_to_otg = enc1_dig_connect_to_otg,
|
||||
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
|
||||
.dig_source_otg = enc1_dig_source_otg,
|
||||
|
||||
.dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
|
||||
};
|
||||
|
||||
void dcn10_stream_encoder_construct(
|
||||
|
@ -621,4 +621,9 @@ void get_audio_clock_info(
|
||||
void enc1_reset_hdmi_stream_attribute(
|
||||
struct stream_encoder *enc);
|
||||
|
||||
bool enc1_stream_encoder_dp_get_pixel_format(
|
||||
struct stream_encoder *enc,
|
||||
enum dc_pixel_encoding *encoding,
|
||||
enum dc_color_depth *depth);
|
||||
|
||||
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
|
||||
|
@ -457,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
|
||||
.dpp_read_state = dpp20_read_state,
|
||||
.dpp_reset = dpp_reset,
|
||||
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
|
||||
.dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
|
||||
.dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
|
||||
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
|
||||
.dpp_set_csc_adjustment = NULL,
|
||||
.dpp_set_csc_default = NULL,
|
||||
|
@ -705,11 +705,6 @@ void dpp2_set_hdr_multiplier(
|
||||
struct dpp *dpp_base,
|
||||
uint32_t multiplier);
|
||||
|
||||
bool dpp2_get_optimal_number_of_taps(
|
||||
struct dpp *dpp,
|
||||
struct scaler_data *scl_data,
|
||||
const struct scaling_taps *in_taps);
|
||||
|
||||
bool dpp2_construct(struct dcn20_dpp *dpp2,
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst,
|
||||
|
@ -1370,6 +1370,9 @@ static void dcn20_program_pipe(
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
if (dc->hwss.setup_vupdate_interrupt)
|
||||
dc->hwss.setup_vupdate_interrupt(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_ctx->update_flags.bits.odm)
|
||||
@ -1396,6 +1399,26 @@ static void dcn20_program_pipe(
|
||||
*/
|
||||
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
|
||||
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
|
||||
|
||||
/* If the pipe has been enabled or has a different opp, we
|
||||
* should reprogram the fmt. This deals with cases where
|
||||
* interation between mpc and odm combine on different streams
|
||||
* causes a different pipe to be chosen to odm combine with.
|
||||
*/
|
||||
if (pipe_ctx->update_flags.bits.enable
|
||||
|| pipe_ctx->update_flags.bits.opp_changed) {
|
||||
|
||||
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
|
||||
pipe_ctx->stream_res.opp,
|
||||
COLOR_SPACE_YCBCR601,
|
||||
pipe_ctx->stream->timing.display_color_depth,
|
||||
pipe_ctx->stream->signal);
|
||||
|
||||
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
|
||||
pipe_ctx->stream_res.opp,
|
||||
&pipe_ctx->stream->bit_depth_params,
|
||||
&pipe_ctx->stream->clamping);
|
||||
}
|
||||
}
|
||||
|
||||
static bool does_pipe_need_lock(struct pipe_ctx *pipe)
|
||||
@ -1510,6 +1533,10 @@ static void dcn20_program_front_end_for_ctx(
|
||||
msleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* WA to apply WM setting*/
|
||||
if (dc->hwseq->wa.DEGVIDCN21)
|
||||
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
|
||||
}
|
||||
|
||||
|
||||
@ -1581,8 +1608,12 @@ bool dcn20_update_bandwidth(
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
|
||||
if (pipe_ctx->prev_odm_pipe == NULL)
|
||||
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
|
||||
|
||||
if (dc->hwss.setup_vupdate_interrupt)
|
||||
dc->hwss.setup_vupdate_interrupt(pipe_ctx);
|
||||
}
|
||||
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
|
||||
@ -1599,7 +1630,8 @@ bool dcn20_update_bandwidth(
|
||||
static void dcn20_enable_writeback(
|
||||
struct dc *dc,
|
||||
const struct dc_stream_status *stream_status,
|
||||
struct dc_writeback_info *wb_info)
|
||||
struct dc_writeback_info *wb_info,
|
||||
struct dc_state *context)
|
||||
{
|
||||
struct dwbc *dwb;
|
||||
struct mcif_wb *mcif_wb;
|
||||
@ -1616,7 +1648,7 @@ static void dcn20_enable_writeback(
|
||||
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
|
||||
/* set MCIF_WB buffer and arbitration configuration */
|
||||
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
|
||||
mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
|
||||
mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
|
||||
/* Enable MCIF_WB */
|
||||
mcif_wb->funcs->enable_mcif(mcif_wb);
|
||||
/* Enable DWB */
|
||||
@ -2181,8 +2213,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
if (link->dc->hwss.program_dmdata_engine)
|
||||
link->dc->hwss.program_dmdata_engine(pipe_ctx);
|
||||
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
|
||||
if (link->dc->hwss.program_dmdata_engine)
|
||||
link->dc->hwss.program_dmdata_engine(pipe_ctx);
|
||||
}
|
||||
|
||||
link->dc->hwss.update_info_frame(pipe_ctx);
|
||||
|
||||
|
@ -91,6 +91,13 @@ struct mpll_cfg {
|
||||
uint32_t ref_range;
|
||||
uint32_t ref_clk;
|
||||
bool hdmimode_enable;
|
||||
bool sup_pre_hp;
|
||||
bool dp_tx0_vergdrv_byp;
|
||||
bool dp_tx1_vergdrv_byp;
|
||||
bool dp_tx2_vergdrv_byp;
|
||||
bool dp_tx3_vergdrv_byp;
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct dpcssys_phy_seq_cfg {
|
||||
|
@ -460,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
|
||||
.set_vtg_params = optc1_set_vtg_params,
|
||||
.program_manual_trigger = optc2_program_manual_trigger,
|
||||
.setup_manual_trigger = optc2_setup_manual_trigger,
|
||||
.is_matching_timing = optc1_is_matching_timing
|
||||
.get_hw_timing = optc1_get_hw_timing,
|
||||
};
|
||||
|
||||
void dcn20_timing_generator_init(struct optc *optc1)
|
||||
|
@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
|
||||
};
|
||||
|
||||
static const struct dcn2_dpp_shift tf_shift = {
|
||||
TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
|
||||
TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
|
||||
TF_DEBUG_REG_LIST_SH_DCN10
|
||||
};
|
||||
|
||||
static const struct dcn2_dpp_mask tf_mask = {
|
||||
TF_REG_LIST_SH_MASK_DCN20(_MASK)
|
||||
TF_REG_LIST_SH_MASK_DCN20(_MASK),
|
||||
TF_DEBUG_REG_LIST_MASK_DCN10
|
||||
};
|
||||
|
||||
#define dwbc_regs_dcn2(id)\
|
||||
@ -732,6 +734,15 @@ static const struct dcn20_vmid_mask vmid_masks = {
|
||||
DCN20_VMID_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCN_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCN_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
#define dsc_regsDCN20(id)\
|
||||
[id] = {\
|
||||
@ -922,7 +933,10 @@ struct dce_aux *dcn20_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -1154,6 +1168,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
|
||||
.create_hwseq = dcn20_hwseq_create,
|
||||
};
|
||||
|
||||
static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
|
||||
|
||||
void dcn20_clock_source_destroy(struct clock_source **clk_src)
|
||||
{
|
||||
kfree(TO_DCE110_CLK_SRC(*clk_src));
|
||||
@ -1892,7 +1908,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
break;
|
||||
case PIXEL_ENCODING_YCBCR420:
|
||||
pipes[pipe_cnt].dout.output_format = dm_420;
|
||||
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
|
||||
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
|
||||
break;
|
||||
case PIXEL_ENCODING_YCBCR422:
|
||||
if (true) /* todo */
|
||||
@ -1906,6 +1922,11 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
|
||||
pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
|
||||
#endif
|
||||
|
||||
/* todo: default max for now, until there is logic reflecting this in dc*/
|
||||
pipes[pipe_cnt].dout.output_bpc = 12;
|
||||
/*
|
||||
@ -2202,7 +2223,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
|
||||
*/
|
||||
if (secondary_pipe == NULL) {
|
||||
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
|
||||
if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
|
||||
if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
|
||||
&& dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
|
||||
preferred_pipe_idx = j;
|
||||
|
||||
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
|
||||
@ -2555,6 +2577,10 @@ void dcn20_calculate_wm(
|
||||
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#endif
|
||||
|
||||
if (vlevel < 2) {
|
||||
pipes[0].clks_cfg.voltage = 2;
|
||||
@ -2566,6 +2592,10 @@ void dcn20_calculate_wm(
|
||||
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#endif
|
||||
|
||||
if (vlevel < 3) {
|
||||
pipes[0].clks_cfg.voltage = 3;
|
||||
@ -2577,6 +2607,10 @@ void dcn20_calculate_wm(
|
||||
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#endif
|
||||
|
||||
pipes[0].clks_cfg.voltage = vlevel;
|
||||
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
|
||||
@ -2586,6 +2620,10 @@ void dcn20_calculate_wm(
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
#endif
|
||||
}
|
||||
|
||||
void dcn20_calculate_dlg_params(
|
||||
@ -2922,7 +2960,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
|
||||
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
|
||||
{
|
||||
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
|
||||
|
||||
@ -2937,7 +2975,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
|
||||
return pp_smu;
|
||||
}
|
||||
|
||||
void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
|
||||
static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
|
||||
{
|
||||
if (pp_smu && *pp_smu) {
|
||||
kfree(*pp_smu);
|
||||
@ -3322,6 +3360,7 @@ static bool construct(
|
||||
dc->caps.post_blend_color_processing = true;
|
||||
dc->caps.force_dp_tps4_for_cp2520 = true;
|
||||
dc->caps.hw_3d_lut = true;
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
|
||||
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
|
||||
dc->debug = debug_defaults_drv;
|
||||
|
@ -95,9 +95,6 @@ struct display_stream_compressor *dcn20_dsc_create(
|
||||
struct dc_context *ctx, uint32_t inst);
|
||||
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
|
||||
|
||||
struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
|
||||
void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
|
||||
|
||||
struct hubp *dcn20_hubp_create(
|
||||
struct dc_context *ctx,
|
||||
uint32_t inst);
|
||||
|
@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
|
||||
.set_avmute = enc1_stream_encoder_set_avmute,
|
||||
.dig_connect_to_otg = enc1_dig_connect_to_otg,
|
||||
.dig_source_otg = enc1_dig_source_otg,
|
||||
|
||||
.dp_get_pixel_format =
|
||||
enc1_stream_encoder_dp_get_pixel_format,
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
.enc_read_state = enc2_read_state,
|
||||
.dp_set_dsc_config = enc2_dp_set_dsc_config,
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Makefile for DCN21.
|
||||
|
||||
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
|
||||
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
|
||||
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include "dm_services.h"
|
||||
#include "dcn20/dcn20_hubbub.h"
|
||||
#include "dcn21_hubbub.h"
|
||||
@ -51,7 +52,7 @@
|
||||
#ifdef NUM_VMID
|
||||
#undef NUM_VMID
|
||||
#endif
|
||||
#define NUM_VMID 1
|
||||
#define NUM_VMID 16
|
||||
|
||||
static uint32_t convert_and_clamp(
|
||||
uint32_t wm_ns,
|
||||
@ -71,56 +72,76 @@ static uint32_t convert_and_clamp(
|
||||
void dcn21_dchvm_init(struct hubbub *hubbub)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
|
||||
uint32_t riommu_active;
|
||||
int i;
|
||||
|
||||
//Init DCHVM block
|
||||
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
|
||||
|
||||
//Poll until RIOMMU_ACTIVE = 1
|
||||
//TODO: Figure out interval us and retry count
|
||||
REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
|
||||
for (i = 0; i < 100; i++) {
|
||||
REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
|
||||
|
||||
//Reflect the power status of DCHUBBUB
|
||||
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
|
||||
if (riommu_active)
|
||||
break;
|
||||
else
|
||||
udelay(5);
|
||||
}
|
||||
|
||||
//Start rIOMMU prefetching
|
||||
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
|
||||
if (riommu_active) {
|
||||
//Reflect the power status of DCHUBBUB
|
||||
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
|
||||
|
||||
// Enable dynamic clock gating
|
||||
REG_UPDATE_4(DCHVM_CLK_CTRL,
|
||||
HVM_DISPCLK_R_GATE_DIS, 0,
|
||||
HVM_DISPCLK_G_GATE_DIS, 0,
|
||||
HVM_DCFCLK_R_GATE_DIS, 0,
|
||||
HVM_DCFCLK_G_GATE_DIS, 0);
|
||||
//Start rIOMMU prefetching
|
||||
REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
|
||||
|
||||
//Poll until HOSTVM_PREFETCH_DONE = 1
|
||||
//TODO: Figure out interval us and retry count
|
||||
REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
|
||||
// Enable dynamic clock gating
|
||||
REG_UPDATE_4(DCHVM_CLK_CTRL,
|
||||
HVM_DISPCLK_R_GATE_DIS, 0,
|
||||
HVM_DISPCLK_G_GATE_DIS, 0,
|
||||
HVM_DCFCLK_R_GATE_DIS, 0,
|
||||
HVM_DCFCLK_G_GATE_DIS, 0);
|
||||
|
||||
//Poll until HOSTVM_PREFETCH_DONE = 1
|
||||
REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
|
||||
}
|
||||
}
|
||||
|
||||
static int hubbub21_init_dchub(struct hubbub *hubbub,
|
||||
int hubbub21_init_dchub(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_phys_addr_config *pa_config)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
|
||||
struct dcn_vmid_page_table_config phys_config;
|
||||
|
||||
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
|
||||
FB_BASE, pa_config->system_aperture.fb_base);
|
||||
FB_BASE, pa_config->system_aperture.fb_base >> 24);
|
||||
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
|
||||
FB_TOP, pa_config->system_aperture.fb_top);
|
||||
FB_TOP, pa_config->system_aperture.fb_top >> 24);
|
||||
REG_SET(DCN_VM_FB_OFFSET, 0,
|
||||
FB_OFFSET, pa_config->system_aperture.fb_offset);
|
||||
FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
|
||||
REG_SET(DCN_VM_AGP_BOT, 0,
|
||||
AGP_BOT, pa_config->system_aperture.agp_bot);
|
||||
AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
|
||||
REG_SET(DCN_VM_AGP_TOP, 0,
|
||||
AGP_TOP, pa_config->system_aperture.agp_top);
|
||||
AGP_TOP, pa_config->system_aperture.agp_top >> 24);
|
||||
REG_SET(DCN_VM_AGP_BASE, 0,
|
||||
AGP_BASE, pa_config->system_aperture.agp_base);
|
||||
AGP_BASE, pa_config->system_aperture.agp_base >> 24);
|
||||
|
||||
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
|
||||
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
|
||||
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
|
||||
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
|
||||
phys_config.depth = 0;
|
||||
phys_config.block_size = 0;
|
||||
// Init VMID 0 based on PA config
|
||||
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
|
||||
}
|
||||
|
||||
dcn21_dchvm_init(hubbub);
|
||||
|
||||
return NUM_VMID;
|
||||
}
|
||||
|
||||
static void hubbub21_program_urgent_watermarks(
|
||||
void hubbub21_program_urgent_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks(
|
||||
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
|
||||
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
|
||||
}
|
||||
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
|
||||
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
|
||||
}
|
||||
|
||||
/* clock state B */
|
||||
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
|
||||
@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks(
|
||||
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
|
||||
}
|
||||
|
||||
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
|
||||
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
|
||||
}
|
||||
|
||||
/* clock state C */
|
||||
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
|
||||
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
|
||||
@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks(
|
||||
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
|
||||
}
|
||||
|
||||
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
|
||||
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
|
||||
}
|
||||
|
||||
/* clock state D */
|
||||
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
|
||||
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
|
||||
@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks(
|
||||
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
|
||||
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
|
||||
}
|
||||
|
||||
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
|
||||
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
|
||||
}
|
||||
}
|
||||
|
||||
static void hubbub21_program_stutter_watermarks(
|
||||
void hubbub21_program_stutter_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks(
|
||||
}
|
||||
}
|
||||
|
||||
static void hubbub21_program_pstate_watermarks(
|
||||
void hubbub21_program_pstate_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
|
||||
}
|
||||
|
||||
void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
|
||||
uint32_t prog_wm_value;
|
||||
|
||||
prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
|
||||
REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
}
|
||||
|
||||
static const struct hubbub_funcs hubbub21_funcs = {
|
||||
.update_dchub = hubbub2_update_dchub,
|
||||
.init_dchub_sys_ctx = hubbub21_init_dchub,
|
||||
.init_vm_ctx = NULL,
|
||||
.init_vm_ctx = hubbub2_init_vm_ctx,
|
||||
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
|
||||
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
|
||||
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
|
||||
.wm_read_state = hubbub21_wm_read_state,
|
||||
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
|
||||
.program_watermarks = hubbub21_program_watermarks,
|
||||
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
|
||||
};
|
||||
|
||||
void hubbub21_construct(struct dcn20_hubbub *hubbub,
|
||||
@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub,
|
||||
hubbub->masks = hubbub_mask;
|
||||
|
||||
hubbub->debug_test_index_pstate = 0xB;
|
||||
hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
|
||||
}
|
||||
|
@ -36,6 +36,10 @@
|
||||
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
|
||||
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
|
||||
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
|
||||
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
|
||||
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
|
||||
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
|
||||
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
|
||||
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
|
||||
SR(DCHVM_CTRL0), \
|
||||
SR(DCHVM_MEM_CTRL), \
|
||||
@ -44,16 +48,9 @@
|
||||
SR(DCHVM_RIOMMU_STAT0)
|
||||
|
||||
#define HUBBUB_REG_LIST_DCN21()\
|
||||
HUBBUB_REG_LIST_DCN_COMMON(), \
|
||||
HUBBUB_REG_LIST_DCN20_COMMON(), \
|
||||
HUBBUB_SR_WATERMARK_REG_LIST(), \
|
||||
HUBBUB_HVM_REG_LIST(), \
|
||||
SR(DCHUBBUB_CRC_CTRL), \
|
||||
SR(DCN_VM_FB_LOCATION_BASE),\
|
||||
SR(DCN_VM_FB_LOCATION_TOP),\
|
||||
SR(DCN_VM_FB_OFFSET),\
|
||||
SR(DCN_VM_AGP_BOT),\
|
||||
SR(DCN_VM_AGP_TOP),\
|
||||
SR(DCN_VM_AGP_BASE)
|
||||
HUBBUB_HVM_REG_LIST()
|
||||
|
||||
#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
|
||||
@ -102,7 +99,7 @@
|
||||
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
|
||||
|
||||
#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
|
||||
HUBBUB_MASK_SH_LIST_HVM(mask_sh),\
|
||||
HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
|
||||
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
|
||||
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
|
||||
@ -114,11 +111,28 @@
|
||||
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
|
||||
|
||||
void dcn21_dchvm_init(struct hubbub *hubbub);
|
||||
int hubbub21_init_dchub(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_phys_addr_config *pa_config);
|
||||
void hubbub21_program_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
void hubbub21_program_urgent_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
void hubbub21_program_stutter_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
void hubbub21_program_pstate_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
void hubbub21_wm_read_state(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_wm *wm);
|
||||
|
@ -22,6 +22,8 @@
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dcn10/dcn10_hubp.h"
|
||||
#include "dcn21_hubp.h"
|
||||
|
||||
#include "dm_services.h"
|
||||
@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
|
||||
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
|
||||
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
|
||||
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
|
||||
.hubp_program_surface_config = hubp2_program_surface_config,
|
||||
.hubp_program_surface_config = hubp1_program_surface_config,
|
||||
.hubp_is_flip_pending = hubp1_is_flip_pending,
|
||||
.hubp_setup = hubp21_setup,
|
||||
.hubp_setup_interdependent = hubp2_setup_interdependent,
|
||||
|
122
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
Normal file
122
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "core_types.h"
|
||||
#include "resource.h"
|
||||
#include "dce/dce_hwseq.h"
|
||||
#include "dcn20/dcn20_hwseq.h"
|
||||
#include "vmid.h"
|
||||
#include "reg_helper.h"
|
||||
#include "hw/clk_mgr.h"
|
||||
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
#define CTX \
|
||||
hws->ctx
|
||||
#define REG(reg)\
|
||||
hws->regs->reg
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
hws->shifts->field_name, hws->masks->field_name
|
||||
|
||||
/* Temporary read settings, future will get values from kmd directly */
|
||||
static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
|
||||
struct dce_hwseq *hws)
|
||||
{
|
||||
uint32_t page_table_base_hi;
|
||||
uint32_t page_table_base_lo;
|
||||
|
||||
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
|
||||
PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
|
||||
REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
|
||||
PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
|
||||
|
||||
config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
|
||||
|
||||
}
|
||||
|
||||
static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
|
||||
{
|
||||
struct dcn_hubbub_phys_addr_config config;
|
||||
|
||||
config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
|
||||
config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
|
||||
config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
|
||||
config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
|
||||
config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
|
||||
config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
|
||||
config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
|
||||
config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
|
||||
config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
|
||||
|
||||
mmhub_update_page_table_config(&config, hws);
|
||||
|
||||
return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
|
||||
}
|
||||
|
||||
// work around for Renoir s0i3, if register is programmed, bypass golden init.
|
||||
|
||||
static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
uint32_t value = 0;
|
||||
|
||||
value = REG_READ(MICROSECOND_TIME_BASE_DIV);
|
||||
|
||||
return value != 0x00120464;
|
||||
}
|
||||
|
||||
void dcn21_exit_optimized_pwr_state(
|
||||
const struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
false);
|
||||
}
|
||||
|
||||
void dcn21_optimize_pwr_state(
|
||||
const struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
true);
|
||||
}
|
||||
|
||||
void dcn21_hw_sequencer_construct(struct dc *dc)
|
||||
{
|
||||
dcn20_hw_sequencer_construct(dc);
|
||||
dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
|
||||
dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
|
||||
dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
|
||||
dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
|
||||
}
|
33
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
Normal file
33
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DC_HWSS_DCN21_H__
|
||||
#define __DC_HWSS_DCN21_H__
|
||||
|
||||
struct dc;
|
||||
|
||||
void dcn21_hw_sequencer_construct(struct dc *dc);
|
||||
|
||||
#endif /* __DC_HWSS_DCN21_H__ */
|
470
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
Normal file
470
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
Normal file
@ -0,0 +1,470 @@
|
||||
/*
|
||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "reg_helper.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include "core_types.h"
|
||||
#include "link_encoder.h"
|
||||
#include "dcn21_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
||||
#define CTX \
|
||||
enc10->base.ctx
|
||||
#define DC_LOGGER \
|
||||
enc10->base.ctx->logger
|
||||
|
||||
#define REG(reg)\
|
||||
(enc10->link_regs->reg)
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
enc10->link_shift->field_name, enc10->link_mask->field_name
|
||||
|
||||
#define IND_REG(index) \
|
||||
(enc10->link_regs->index)
|
||||
|
||||
static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
|
||||
// RBR
|
||||
{
|
||||
.hdmimode_enable = 0,
|
||||
.ref_range = 1,
|
||||
.ref_clk_mpllb_div = 1,
|
||||
.mpllb_ssc_en = 1,
|
||||
.mpllb_div5_clk_en = 1,
|
||||
.mpllb_multiplier = 238,
|
||||
.mpllb_fracn_en = 0,
|
||||
.mpllb_fracn_quot = 0,
|
||||
.mpllb_fracn_rem = 0,
|
||||
.mpllb_fracn_den = 1,
|
||||
.mpllb_ssc_up_spread = 0,
|
||||
.mpllb_ssc_peak = 44237,
|
||||
.mpllb_ssc_stepsize = 59454,
|
||||
.mpllb_div_clk_en = 0,
|
||||
.mpllb_div_multiplier = 0,
|
||||
.mpllb_hdmi_div = 0,
|
||||
.mpllb_tx_clk_div = 2,
|
||||
.tx_vboost_lvl = 5,
|
||||
.mpllb_pmix_en = 1,
|
||||
.mpllb_word_div2_en = 0,
|
||||
.mpllb_ana_v2i = 2,
|
||||
.mpllb_ana_freq_vco = 2,
|
||||
.mpllb_ana_cp_int = 9,
|
||||
.mpllb_ana_cp_prop = 15,
|
||||
.hdmi_pixel_clk_div = 0,
|
||||
},
|
||||
// HBR
|
||||
{
|
||||
.hdmimode_enable = 0,
|
||||
.ref_range = 1,
|
||||
.ref_clk_mpllb_div = 1,
|
||||
.mpllb_ssc_en = 1,
|
||||
.mpllb_div5_clk_en = 1,
|
||||
.mpllb_multiplier = 192,
|
||||
.mpllb_fracn_en = 1,
|
||||
.mpllb_fracn_quot = 32768,
|
||||
.mpllb_fracn_rem = 0,
|
||||
.mpllb_fracn_den = 1,
|
||||
.mpllb_ssc_up_spread = 0,
|
||||
.mpllb_ssc_peak = 36864,
|
||||
.mpllb_ssc_stepsize = 49545,
|
||||
.mpllb_div_clk_en = 0,
|
||||
.mpllb_div_multiplier = 0,
|
||||
.mpllb_hdmi_div = 0,
|
||||
.mpllb_tx_clk_div = 1,
|
||||
.tx_vboost_lvl = 5,
|
||||
.mpllb_pmix_en = 1,
|
||||
.mpllb_word_div2_en = 0,
|
||||
.mpllb_ana_v2i = 2,
|
||||
.mpllb_ana_freq_vco = 3,
|
||||
.mpllb_ana_cp_int = 9,
|
||||
.mpllb_ana_cp_prop = 15,
|
||||
.hdmi_pixel_clk_div = 0,
|
||||
},
|
||||
//HBR2
|
||||
{
|
||||
.hdmimode_enable = 0,
|
||||
.ref_range = 1,
|
||||
.ref_clk_mpllb_div = 1,
|
||||
.mpllb_ssc_en = 1,
|
||||
.mpllb_div5_clk_en = 1,
|
||||
.mpllb_multiplier = 192,
|
||||
.mpllb_fracn_en = 1,
|
||||
.mpllb_fracn_quot = 32768,
|
||||
.mpllb_fracn_rem = 0,
|
||||
.mpllb_fracn_den = 1,
|
||||
.mpllb_ssc_up_spread = 0,
|
||||
.mpllb_ssc_peak = 36864,
|
||||
.mpllb_ssc_stepsize = 49545,
|
||||
.mpllb_div_clk_en = 0,
|
||||
.mpllb_div_multiplier = 0,
|
||||
.mpllb_hdmi_div = 0,
|
||||
.mpllb_tx_clk_div = 0,
|
||||
.tx_vboost_lvl = 5,
|
||||
.mpllb_pmix_en = 1,
|
||||
.mpllb_word_div2_en = 0,
|
||||
.mpllb_ana_v2i = 2,
|
||||
.mpllb_ana_freq_vco = 3,
|
||||
.mpllb_ana_cp_int = 9,
|
||||
.mpllb_ana_cp_prop = 15,
|
||||
.hdmi_pixel_clk_div = 0,
|
||||
},
|
||||
//HBR3
|
||||
{
|
||||
.hdmimode_enable = 0,
|
||||
.ref_range = 1,
|
||||
.ref_clk_mpllb_div = 1,
|
||||
.mpllb_ssc_en = 1,
|
||||
.mpllb_div5_clk_en = 1,
|
||||
.mpllb_multiplier = 304,
|
||||
.mpllb_fracn_en = 1,
|
||||
.mpllb_fracn_quot = 49152,
|
||||
.mpllb_fracn_rem = 0,
|
||||
.mpllb_fracn_den = 1,
|
||||
.mpllb_ssc_up_spread = 0,
|
||||
.mpllb_ssc_peak = 55296,
|
||||
.mpllb_ssc_stepsize = 74318,
|
||||
.mpllb_div_clk_en = 0,
|
||||
.mpllb_div_multiplier = 0,
|
||||
.mpllb_hdmi_div = 0,
|
||||
.mpllb_tx_clk_div = 0,
|
||||
.tx_vboost_lvl = 5,
|
||||
.mpllb_pmix_en = 1,
|
||||
.mpllb_word_div2_en = 0,
|
||||
.mpllb_ana_v2i = 2,
|
||||
.mpllb_ana_freq_vco = 1,
|
||||
.mpllb_ana_cp_int = 7,
|
||||
.mpllb_ana_cp_prop = 16,
|
||||
.hdmi_pixel_clk_div = 0,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
static bool update_cfg_data(
|
||||
struct dcn10_link_encoder *enc10,
|
||||
const struct dc_link_settings *link_settings,
|
||||
struct dpcssys_phy_seq_cfg *cfg)
|
||||
{
|
||||
int i;
|
||||
|
||||
cfg->load_sram_fw = false;
|
||||
cfg->use_calibration_setting = true;
|
||||
|
||||
//TODO: need to implement a proper lane mapping for Renoir.
|
||||
for (i = 0; i < 4; i++)
|
||||
cfg->lane_en[i] = true;
|
||||
|
||||
switch (link_settings->link_rate) {
|
||||
case LINK_RATE_LOW:
|
||||
cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
|
||||
break;
|
||||
case LINK_RATE_HIGH:
|
||||
cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
|
||||
break;
|
||||
case LINK_RATE_HIGH2:
|
||||
cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
|
||||
break;
|
||||
case LINK_RATE_HIGH3:
|
||||
cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
|
||||
break;
|
||||
default:
|
||||
DC_LOG_ERROR("%s: No supported link rate found %X!\n",
|
||||
__func__, link_settings->link_rate);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
|
||||
struct dc_link_settings *link_settings)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
uint32_t value;
|
||||
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
|
||||
|
||||
if (!value && link_settings->lane_count > LANE_COUNT_TWO)
|
||||
link_settings->lane_count = LANE_COUNT_TWO;
|
||||
}
|
||||
|
||||
bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
uint32_t value;
|
||||
|
||||
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
|
||||
|
||||
// if value == 1 alt mode is disabled, otherwise it is enabled
|
||||
return !value;
|
||||
}
|
||||
|
||||
bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
int value;
|
||||
|
||||
if (enc->features.flags.bits.DP_IS_USB_C) {
|
||||
REG_GET(RDPCSTX_PHY_CNTL6,
|
||||
RDPCS_PHY_DPALT_DISABLE, &value);
|
||||
|
||||
if (value == 1) {
|
||||
ASSERT(0);
|
||||
return false;
|
||||
}
|
||||
REG_UPDATE(RDPCSTX_PHY_CNTL6,
|
||||
RDPCS_PHY_DPALT_DISABLE_ACK, 0);
|
||||
|
||||
udelay(40);
|
||||
|
||||
REG_GET(RDPCSTX_PHY_CNTL6,
|
||||
RDPCS_PHY_DPALT_DISABLE, &value);
|
||||
if (value == 1) {
|
||||
ASSERT(0);
|
||||
REG_UPDATE(RDPCSTX_PHY_CNTL6,
|
||||
RDPCS_PHY_DPALT_DISABLE_ACK, 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
|
||||
if (enc->features.flags.bits.DP_IS_USB_C) {
|
||||
REG_UPDATE(RDPCSTX_PHY_CNTL6,
|
||||
RDPCS_PHY_DPALT_DISABLE_ACK, 1);
|
||||
}
|
||||
|
||||
REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
|
||||
|
||||
}
|
||||
|
||||
void dcn21_link_encoder_enable_dp_output(
|
||||
struct link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum clock_source_id clock_source)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
|
||||
struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
|
||||
|
||||
if (!dcn21_link_encoder_acquire_phy(enc))
|
||||
return;
|
||||
|
||||
if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
|
||||
dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!update_cfg_data(enc10, link_settings, cfg))
|
||||
return;
|
||||
|
||||
enc1_configure_encoder(enc10, link_settings);
|
||||
|
||||
dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
|
||||
|
||||
}
|
||||
|
||||
void dcn21_link_encoder_enable_dp_mst_output(
|
||||
struct link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum clock_source_id clock_source)
|
||||
{
|
||||
if (!dcn21_link_encoder_acquire_phy(enc))
|
||||
return;
|
||||
|
||||
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
|
||||
}
|
||||
|
||||
void dcn21_link_encoder_disable_output(
|
||||
struct link_encoder *enc,
|
||||
enum signal_type signal)
|
||||
{
|
||||
dcn10_link_encoder_disable_output(enc, signal);
|
||||
|
||||
if (dc_is_dp_signal(signal))
|
||||
dcn21_link_encoder_release_phy(enc);
|
||||
}
|
||||
|
||||
|
||||
static const struct link_encoder_funcs dcn21_link_enc_funcs = {
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
.read_state = link_enc2_read_state,
|
||||
#endif
|
||||
.validate_output_with_stream =
|
||||
dcn10_link_encoder_validate_output_with_stream,
|
||||
.hw_init = enc2_hw_init,
|
||||
.setup = dcn10_link_encoder_setup,
|
||||
.enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
|
||||
.enable_dp_output = dcn21_link_encoder_enable_dp_output,
|
||||
.enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
|
||||
.disable_output = dcn21_link_encoder_disable_output,
|
||||
.dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
|
||||
.dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
|
||||
.update_mst_stream_allocation_table =
|
||||
dcn10_link_encoder_update_mst_stream_allocation_table,
|
||||
.psr_program_dp_dphy_fast_training =
|
||||
dcn10_psr_program_dp_dphy_fast_training,
|
||||
.psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
|
||||
.connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
|
||||
.enable_hpd = dcn10_link_encoder_enable_hpd,
|
||||
.disable_hpd = dcn10_link_encoder_disable_hpd,
|
||||
.is_dig_enabled = dcn10_is_dig_enabled,
|
||||
.destroy = dcn10_link_encoder_destroy,
|
||||
.fec_set_enable = enc2_fec_set_enable,
|
||||
.fec_set_ready = enc2_fec_set_ready,
|
||||
.fec_is_active = enc2_fec_is_active,
|
||||
.get_dig_frontend = dcn10_get_dig_frontend,
|
||||
.is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
|
||||
.get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
|
||||
};
|
||||
|
||||
void dcn21_link_encoder_construct(
|
||||
struct dcn21_link_encoder *enc21,
|
||||
const struct encoder_init_data *init_data,
|
||||
const struct encoder_feature_support *enc_features,
|
||||
const struct dcn10_link_enc_registers *link_regs,
|
||||
const struct dcn10_link_enc_aux_registers *aux_regs,
|
||||
const struct dcn10_link_enc_hpd_registers *hpd_regs,
|
||||
const struct dcn10_link_enc_shift *link_shift,
|
||||
const struct dcn10_link_enc_mask *link_mask)
|
||||
{
|
||||
struct bp_encoder_cap_info bp_cap_info = {0};
|
||||
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
struct dcn10_link_encoder *enc10 = &enc21->enc10;
|
||||
|
||||
enc10->base.funcs = &dcn21_link_enc_funcs;
|
||||
enc10->base.ctx = init_data->ctx;
|
||||
enc10->base.id = init_data->encoder;
|
||||
|
||||
enc10->base.hpd_source = init_data->hpd_source;
|
||||
enc10->base.connector = init_data->connector;
|
||||
|
||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
|
||||
enc10->base.features = *enc_features;
|
||||
|
||||
enc10->base.transmitter = init_data->transmitter;
|
||||
|
||||
/* set the flag to indicate whether driver poll the I2C data pin
|
||||
* while doing the DP sink detect
|
||||
*/
|
||||
|
||||
/* if (dal_adapter_service_is_feature_supported(as,
|
||||
FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
|
||||
enc10->base.features.flags.bits.
|
||||
DP_SINK_DETECT_POLL_DATA_PIN = true;*/
|
||||
|
||||
enc10->base.output_signals =
|
||||
SIGNAL_TYPE_DVI_SINGLE_LINK |
|
||||
SIGNAL_TYPE_DVI_DUAL_LINK |
|
||||
SIGNAL_TYPE_LVDS |
|
||||
SIGNAL_TYPE_DISPLAY_PORT |
|
||||
SIGNAL_TYPE_DISPLAY_PORT_MST |
|
||||
SIGNAL_TYPE_EDP |
|
||||
SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
|
||||
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
|
||||
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
|
||||
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
|
||||
* DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
|
||||
* Prefer DIG assignment is decided by board design.
|
||||
* For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
|
||||
* and VBIOS will filter out 7 UNIPHY for DCE 8.0.
|
||||
* By this, adding DIGG should not hurt DCE 8.0.
|
||||
* This will let DCE 8.1 share DCE 8.0 as much as possible
|
||||
*/
|
||||
|
||||
enc10->link_regs = link_regs;
|
||||
enc10->aux_regs = aux_regs;
|
||||
enc10->hpd_regs = hpd_regs;
|
||||
enc10->link_shift = link_shift;
|
||||
enc10->link_mask = link_mask;
|
||||
|
||||
switch (enc10->base.transmitter) {
|
||||
case TRANSMITTER_UNIPHY_A:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGA;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_B:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGB;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_C:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGC;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_D:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGD;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_E:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGE;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_F:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGF;
|
||||
break;
|
||||
case TRANSMITTER_UNIPHY_G:
|
||||
enc10->base.preferred_engine = ENGINE_ID_DIGG;
|
||||
break;
|
||||
default:
|
||||
ASSERT_CRITICAL(false);
|
||||
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
||||
/* default to one to mirror Windows behavior */
|
||||
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
|
||||
|
||||
result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
|
||||
enc10->base.id, &bp_cap_info);
|
||||
|
||||
/* Override features with DCE-specific values */
|
||||
if (result == BP_RESULT_OK) {
|
||||
enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
|
||||
bp_cap_info.DP_HBR2_EN;
|
||||
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
|
||||
bp_cap_info.DP_HBR3_EN;
|
||||
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||
enc10->base.features.flags.bits.DP_IS_USB_C =
|
||||
bp_cap_info.DP_IS_USB_C;
|
||||
} else {
|
||||
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||
__func__,
|
||||
result);
|
||||
}
|
||||
if (enc10->base.ctx->dc->debug.hdmi20_disable) {
|
||||
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
|
||||
}
|
||||
}
|
61
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
Normal file
61
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2012-15 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DC_LINK_ENCODER__DCN21_H__
|
||||
#define __DC_LINK_ENCODER__DCN21_H__
|
||||
|
||||
#include "dcn20/dcn20_link_encoder.h"
|
||||
|
||||
struct dcn21_link_encoder {
|
||||
struct dcn10_link_encoder enc10;
|
||||
struct dpcssys_phy_seq_cfg phy_seq_cfg;
|
||||
};
|
||||
|
||||
#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
|
||||
LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
|
||||
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
|
||||
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
|
||||
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
|
||||
LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
|
||||
SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
|
||||
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
|
||||
SR(RDPCSTX0_RDPCSTX_SCRATCH)
|
||||
|
||||
void dcn21_link_encoder_enable_dp_output(
|
||||
struct link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
enum clock_source_id clock_source);
|
||||
|
||||
void dcn21_link_encoder_construct(
|
||||
struct dcn21_link_encoder *enc21,
|
||||
const struct encoder_init_data *init_data,
|
||||
const struct encoder_feature_support *enc_features,
|
||||
const struct dcn10_link_enc_registers *link_regs,
|
||||
const struct dcn10_link_enc_aux_registers *aux_regs,
|
||||
const struct dcn10_link_enc_hpd_registers *hpd_regs,
|
||||
const struct dcn10_link_enc_shift *link_shift,
|
||||
const struct dcn10_link_enc_mask *link_mask);
|
||||
|
||||
#endif
|
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
|
||||
@ -42,11 +40,11 @@
|
||||
#include "irq/dcn21/irq_service_dcn21.h"
|
||||
#include "dcn20/dcn20_dpp.h"
|
||||
#include "dcn20/dcn20_optc.h"
|
||||
#include "dcn20/dcn20_hwseq.h"
|
||||
#include "dcn21/dcn21_hwseq.h"
|
||||
#include "dce110/dce110_hw_sequencer.h"
|
||||
#include "dcn20/dcn20_opp.h"
|
||||
#include "dcn20/dcn20_dsc.h"
|
||||
#include "dcn20/dcn20_link_encoder.h"
|
||||
#include "dcn21/dcn21_link_encoder.h"
|
||||
#include "dcn20/dcn20_stream_encoder.h"
|
||||
#include "dce/dce_clock_source.h"
|
||||
#include "dce/dce_audio.h"
|
||||
@ -84,6 +82,7 @@
|
||||
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
|
||||
.odm_capable = 1,
|
||||
.gpuvm_enable = 0,
|
||||
.hostvm_enable = 0,
|
||||
.gpuvm_max_page_table_levels = 1,
|
||||
@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.state = 4,
|
||||
.dcfclk_mhz = 810.0,
|
||||
.fabricclk_mhz = 1600.0,
|
||||
.dispclk_mhz = 1015.0,
|
||||
.dppclk_mhz = 1015.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.dispclk_mhz = 1395.0,
|
||||
.dppclk_mhz = 1285.0,
|
||||
.phyclk_mhz = 1325.0,
|
||||
.socclk_mhz = 953.0,
|
||||
.dscclk_mhz = 318.334,
|
||||
.dscclk_mhz = 489.0,
|
||||
.dram_speed_mts = 4266.0,
|
||||
},
|
||||
/*Extra state, no dispclk ramping*/
|
||||
@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.state = 5,
|
||||
.dcfclk_mhz = 810.0,
|
||||
.fabricclk_mhz = 1600.0,
|
||||
.dispclk_mhz = 1015.0,
|
||||
.dppclk_mhz = 1015.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.dispclk_mhz = 1395.0,
|
||||
.dppclk_mhz = 1285.0,
|
||||
.phyclk_mhz = 1325.0,
|
||||
.socclk_mhz = 953.0,
|
||||
.dscclk_mhz = 318.334,
|
||||
.dscclk_mhz = 489.0,
|
||||
.dram_speed_mts = 4266.0,
|
||||
},
|
||||
|
||||
},
|
||||
|
||||
.sr_exit_time_us = 9.0,
|
||||
.sr_enter_plus_exit_time_us = 11.0,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 17.0,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = {
|
||||
NBIO_SR(BIOS_SCRATCH_6)
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_registers dmcu_regs = {
|
||||
DMCU_DCN10_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_shift dmcu_shift = {
|
||||
DMCU_MASK_SH_LIST_DCN10(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_dmcu_mask dmcu_mask = {
|
||||
DMCU_MASK_SH_LIST_DCN10(_MASK)
|
||||
};
|
||||
|
||||
static const struct dce_abm_registers abm_regs = {
|
||||
ABM_DCN20_REG_LIST()
|
||||
};
|
||||
|
||||
static const struct dce_abm_shift abm_shift = {
|
||||
ABM_MASK_SH_LIST_DCN20(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce_abm_mask abm_mask = {
|
||||
ABM_MASK_SH_LIST_DCN20(_MASK)
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DMUB
|
||||
static const struct dcn21_dmcub_registers dmcub_regs = {
|
||||
DMCUB_REG_LIST_DCN()
|
||||
@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
|
||||
stream_enc_regs(4),
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_shift aux_shift = {
|
||||
DCN_AUX_MASK_SH_LIST(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dce110_aux_registers_mask aux_mask = {
|
||||
DCN_AUX_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
static const struct dcn10_stream_encoder_shift se_shift = {
|
||||
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
|
||||
};
|
||||
@ -636,6 +667,8 @@ static const struct dcn10_stream_encoder_mask se_mask = {
|
||||
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
|
||||
};
|
||||
|
||||
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
|
||||
|
||||
static struct input_pixel_processor *dcn21_ipp_create(
|
||||
struct dc_context *ctx, uint32_t inst)
|
||||
{
|
||||
@ -683,7 +716,10 @@ static struct dce_aux *dcn21_aux_engine_create(
|
||||
|
||||
dce110_aux_engine_construct(aux_engine, ctx, inst,
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
|
||||
&aux_engine_regs[inst]);
|
||||
&aux_engine_regs[inst],
|
||||
&aux_mask,
|
||||
&aux_shift,
|
||||
ctx->dc->caps.extended_aux_timeout_support);
|
||||
|
||||
return &aux_engine->base;
|
||||
}
|
||||
@ -726,11 +762,12 @@ static const struct resource_caps res_cap_rn = {
|
||||
.num_timing_generator = 4,
|
||||
.num_opp = 4,
|
||||
.num_video_plane = 4,
|
||||
.num_audio = 6, // 6 audio endpoints. 4 audio streams
|
||||
.num_audio = 4, // 4 audio endpoints. 4 audio streams
|
||||
.num_stream_encoder = 5,
|
||||
.num_pll = 5, // maybe 3 because the last two used for USB-c
|
||||
.num_dwb = 1,
|
||||
.num_ddc = 5,
|
||||
.num_vmid = 1,
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
.num_dsc = 3,
|
||||
#endif
|
||||
@ -800,11 +837,11 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
.performance_trace = false,
|
||||
.max_downscale_src_width = 5120,/*upto 5K*/
|
||||
.max_downscale_src_width = 3840,
|
||||
.disable_pplib_wm_range = false,
|
||||
.scl_reset_length10 = true,
|
||||
.sanity_checks = true,
|
||||
.disable_48mhz_pwrdwn = true,
|
||||
.disable_48mhz_pwrdwn = false,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
@ -939,7 +976,7 @@ static void destruct(struct dcn21_resource_pool *pool)
|
||||
dcn_dccg_destroy(&pool->base.dccg);
|
||||
|
||||
if (pool->base.pp_smu != NULL)
|
||||
dcn20_pp_smu_destroy(&pool->base.pp_smu);
|
||||
dcn21_pp_smu_destroy(&pool->base.pp_smu);
|
||||
}
|
||||
|
||||
|
||||
@ -974,6 +1011,29 @@ static void calculate_wm_set_for_vlevel(
|
||||
|
||||
}
|
||||
|
||||
static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
|
||||
{
|
||||
kernel_fpu_begin();
|
||||
if (dc->bb_overrides.sr_exit_time_ns) {
|
||||
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
|
||||
bb->sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.urgent_latency_ns) {
|
||||
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.dram_clock_change_latency_ns) {
|
||||
bb->dram_clock_change_latency_us =
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
kernel_fpu_end();
|
||||
}
|
||||
|
||||
void dcn21_calculate_wm(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
@ -988,6 +1048,8 @@ void dcn21_calculate_wm(
|
||||
|
||||
ASSERT(bw_params);
|
||||
|
||||
patch_bounding_box(dc, &context->bw_ctx.dml.soc);
|
||||
|
||||
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
@ -1278,7 +1340,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
|
||||
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
|
||||
dcn2_1_soc.num_chans = bw_params->num_channels;
|
||||
dcn2_1_soc.num_states = 0;
|
||||
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
|
||||
@ -1288,8 +1349,9 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
|
||||
/* This is probably wrong, TODO: find correct calculation */
|
||||
dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
|
||||
dcn2_1_soc.num_states++;
|
||||
}
|
||||
dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
|
||||
dcn2_1_soc.num_states = i;
|
||||
}
|
||||
|
||||
/* Temporary Place holder until we can get them from fuse */
|
||||
@ -1317,32 +1379,42 @@ static struct dpm_clocks dummy_clocks = {
|
||||
|
||||
};
|
||||
|
||||
enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
|
||||
static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
|
||||
struct pp_smu_wm_range_sets *ranges)
|
||||
{
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
|
||||
enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
|
||||
static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
|
||||
struct dpm_clocks *clock_table)
|
||||
{
|
||||
*clock_table = dummy_clocks;
|
||||
return PP_SMU_RESULT_OK;
|
||||
}
|
||||
|
||||
struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
|
||||
static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
|
||||
{
|
||||
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
|
||||
|
||||
pp_smu->ctx.ver = PP_SMU_VER_RN;
|
||||
if (!pp_smu)
|
||||
return pp_smu;
|
||||
|
||||
pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
|
||||
pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
|
||||
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
|
||||
pp_smu->ctx.ver = PP_SMU_VER_RN;
|
||||
pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
|
||||
pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
|
||||
} else {
|
||||
|
||||
dm_pp_get_funcs(ctx, pp_smu);
|
||||
|
||||
if (pp_smu->ctx.ver != PP_SMU_VER_RN)
|
||||
pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
|
||||
}
|
||||
|
||||
return pp_smu;
|
||||
}
|
||||
|
||||
void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
|
||||
static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
|
||||
{
|
||||
if (pp_smu && *pp_smu) {
|
||||
kfree(*pp_smu);
|
||||
@ -1400,6 +1472,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
|
||||
hws->regs = &hwseq_reg;
|
||||
hws->shifts = &hwseq_shift;
|
||||
hws->masks = &hwseq_mask;
|
||||
hws->wa.DEGVIDCN21 = true;
|
||||
}
|
||||
return hws;
|
||||
}
|
||||
@ -1418,9 +1491,103 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
|
||||
.create_hwseq = dcn21_hwseq_create,
|
||||
};
|
||||
|
||||
static const struct encoder_feature_support link_enc_feature = {
|
||||
.max_hdmi_deep_color = COLOR_DEPTH_121212,
|
||||
.max_hdmi_pixel_clock = 600000,
|
||||
.hdmi_ycbcr420_supported = true,
|
||||
.dp_ycbcr420_supported = true,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS4_CAPABLE = true
|
||||
};
|
||||
|
||||
|
||||
#define link_regs(id, phyid)\
|
||||
[id] = {\
|
||||
LE_DCN10_REG_LIST(id), \
|
||||
UNIPHY_DCN2_REG_LIST(phyid), \
|
||||
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
|
||||
}
|
||||
|
||||
static const struct dcn10_link_enc_registers link_enc_regs[] = {
|
||||
link_regs(0, A),
|
||||
link_regs(1, B),
|
||||
link_regs(2, C),
|
||||
link_regs(3, D),
|
||||
link_regs(4, E),
|
||||
};
|
||||
|
||||
#define aux_regs(id)\
|
||||
[id] = {\
|
||||
DCN2_AUX_REG_LIST(id)\
|
||||
}
|
||||
|
||||
static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
|
||||
aux_regs(0),
|
||||
aux_regs(1),
|
||||
aux_regs(2),
|
||||
aux_regs(3),
|
||||
aux_regs(4)
|
||||
};
|
||||
|
||||
#define hpd_regs(id)\
|
||||
[id] = {\
|
||||
HPD_REG_LIST(id)\
|
||||
}
|
||||
|
||||
static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
|
||||
hpd_regs(0),
|
||||
hpd_regs(1),
|
||||
hpd_regs(2),
|
||||
hpd_regs(3),
|
||||
hpd_regs(4)
|
||||
};
|
||||
|
||||
static const struct dcn10_link_enc_shift le_shift = {
|
||||
LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dcn10_link_enc_mask le_mask = {
|
||||
LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn21_link_encoder_create(
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn21_link_encoder *enc21 =
|
||||
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
|
||||
|
||||
if (!enc21)
|
||||
return NULL;
|
||||
|
||||
dcn21_link_encoder_construct(enc21,
|
||||
enc_init_data,
|
||||
&link_enc_feature,
|
||||
&link_enc_regs[enc_init_data->transmitter],
|
||||
&link_enc_aux_regs[enc_init_data->channel - 1],
|
||||
&link_enc_hpd_regs[enc_init_data->hpd_source],
|
||||
&le_shift,
|
||||
&le_mask);
|
||||
|
||||
return &enc21->enc10.base;
|
||||
}
|
||||
#define CTX ctx
|
||||
|
||||
#define REG(reg_name) \
|
||||
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
{
|
||||
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
|
||||
/* RV1 support max 4 pipes */
|
||||
value = value & 0xf;
|
||||
return value;
|
||||
}
|
||||
|
||||
static struct resource_funcs dcn21_res_pool_funcs = {
|
||||
.destroy = dcn21_destroy_resource_pool,
|
||||
.link_enc_create = dcn20_link_encoder_create,
|
||||
.link_enc_create = dcn21_link_encoder_create,
|
||||
.validate_bandwidth = dcn21_validate_bandwidth,
|
||||
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
@ -1437,9 +1604,10 @@ static bool construct(
|
||||
struct dc *dc,
|
||||
struct dcn21_resource_pool *pool)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct irq_service_init_data init_data;
|
||||
uint32_t pipe_fuses = read_pipe_fuses(ctx);
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
||||
@ -1457,7 +1625,9 @@ static bool construct(
|
||||
*************************************************/
|
||||
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
|
||||
|
||||
pool->base.pipe_count = 4;
|
||||
/* max pipe num for ASIC before check pipe fuses */
|
||||
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
|
||||
|
||||
dc->caps.max_downscale_ratio = 200;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.max_cursor_size = 256;
|
||||
@ -1467,6 +1637,7 @@ static bool construct(
|
||||
dc->caps.max_slave_planes = 1;
|
||||
dc->caps.post_blend_color_processing = true;
|
||||
dc->caps.force_dp_tps4_for_cp2520 = true;
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
|
||||
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
|
||||
dc->debug = debug_defaults_drv;
|
||||
@ -1516,6 +1687,26 @@ static bool construct(
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
pool->base.dmcu = dcn20_dmcu_create(ctx,
|
||||
&dmcu_regs,
|
||||
&dmcu_shift,
|
||||
&dmcu_mask);
|
||||
if (pool->base.dmcu == NULL) {
|
||||
dm_error("DC: failed to create dmcu!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
pool->base.abm = dce_abm_create(ctx,
|
||||
&abm_regs,
|
||||
&abm_shift,
|
||||
&abm_mask);
|
||||
if (pool->base.abm == NULL) {
|
||||
dm_error("DC: failed to create abm!\n");
|
||||
BREAK_TO_DEBUGGER();
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DMUB
|
||||
pool->base.dmcub = dcn21_dmcub_create(ctx,
|
||||
&dmcub_regs,
|
||||
@ -1537,8 +1728,15 @@ static bool construct(
|
||||
if (!pool->base.irqs)
|
||||
goto create_fail;
|
||||
|
||||
j = 0;
|
||||
/* mem input -> ipp -> dpp -> opp -> TG */
|
||||
for (i = 0; i < pool->base.pipe_count; i++) {
|
||||
/* if pipe is disabled, skip instance of HW pipe,
|
||||
* i.e, skip ASIC register instance
|
||||
*/
|
||||
if ((pipe_fuses & (1 << i)) != 0)
|
||||
continue;
|
||||
|
||||
pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
|
||||
if (pool->base.hubps[i] == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
@ -1562,6 +1760,23 @@ static bool construct(
|
||||
"DC: failed to create dpps!\n");
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
pool->base.opps[i] = dcn21_opp_create(ctx, i);
|
||||
if (pool->base.opps[i] == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
dm_error(
|
||||
"DC: failed to create output pixel processor!\n");
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
pool->base.timing_generators[i] = dcn21_timing_generator_create(
|
||||
ctx, i);
|
||||
if (pool->base.timing_generators[i] == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
dm_error("DC: failed to create tg!\n");
|
||||
goto create_fail;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
@ -1582,27 +1797,9 @@ static bool construct(
|
||||
pool->base.sw_i2cs[i] = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_opp; i++) {
|
||||
pool->base.opps[i] = dcn21_opp_create(ctx, i);
|
||||
if (pool->base.opps[i] == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
dm_error(
|
||||
"DC: failed to create output pixel processor!\n");
|
||||
goto create_fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
|
||||
pool->base.timing_generators[i] = dcn21_timing_generator_create(
|
||||
ctx, i);
|
||||
if (pool->base.timing_generators[i] == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
dm_error("DC: failed to create tg!\n");
|
||||
goto create_fail;
|
||||
}
|
||||
}
|
||||
|
||||
pool->base.timing_generator_count = i;
|
||||
pool->base.timing_generator_count = j;
|
||||
pool->base.pipe_count = j;
|
||||
pool->base.mpcc_count = j;
|
||||
|
||||
pool->base.mpc = dcn21_mpc_create(ctx);
|
||||
if (pool->base.mpc == NULL) {
|
||||
@ -1645,7 +1842,7 @@ static bool construct(
|
||||
&res_create_funcs : &res_create_maximus_funcs)))
|
||||
goto create_fail;
|
||||
|
||||
dcn20_hw_sequencer_construct(dc);
|
||||
dcn21_hw_sequencer_construct(dc);
|
||||
|
||||
dc->caps.max_planes = pool->base.pipe_count;
|
||||
|
||||
|
@ -249,12 +249,10 @@ struct pp_smu_funcs_nv {
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
|
||||
|
||||
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
|
||||
#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4
|
||||
#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
|
||||
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
|
||||
#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
|
||||
#define PP_SMU_NUM_FCLK_DPM_LEVELS 8
|
||||
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 8
|
||||
|
||||
struct dpm_clock {
|
||||
uint32_t Freq; // In MHz
|
||||
@ -288,7 +286,6 @@ struct pp_smu_funcs_rn {
|
||||
enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
|
||||
struct dpm_clocks *clock_table);
|
||||
};
|
||||
#endif
|
||||
|
||||
struct pp_smu_funcs {
|
||||
struct pp_smu ctx;
|
||||
|
@ -65,6 +65,7 @@ typedef struct {
|
||||
|
||||
#define BPP_INVALID 0
|
||||
#define BPP_BLENDED_PIPE 0xffffffff
|
||||
#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
|
||||
|
||||
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
|
||||
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
|
||||
@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP(
|
||||
return 30;
|
||||
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
|
||||
return 24;
|
||||
else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
|
||||
return 18;
|
||||
else
|
||||
return BPP_INVALID;
|
||||
}
|
||||
@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
mode_lib->vba.MaximumSwathWidthInLineBuffer);
|
||||
}
|
||||
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
|
||||
double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
|
||||
mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
|
||||
mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
|
||||
mode_lib->vba.MaxDispclk[i],
|
||||
@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
&& i == mode_lib->vba.soc.num_states)
|
||||
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
|
||||
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
|
||||
if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
|
||||
if (mode_lib->vba.ODMCapability == false ||
|
||||
(locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
|
||||
&& (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
|
||||
locals->ODMCombineEnablePerState[i][k] = false;
|
||||
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
|
||||
} else {
|
||||
|
@ -269,7 +269,7 @@ struct writeback_st {
|
||||
|
||||
struct _vcs_dpi_display_output_params_st {
|
||||
int dp_lanes;
|
||||
int output_bpp;
|
||||
double output_bpp;
|
||||
int dsc_enable;
|
||||
int wb_enable;
|
||||
int num_active_wb;
|
||||
|
@ -434,6 +434,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
||||
dst->odm_combine;
|
||||
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
|
||||
(enum output_format_class) (dout->output_format);
|
||||
mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
|
||||
dout->output_bpp;
|
||||
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
|
||||
(enum output_encoder_class) (dout->output_type);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user