mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-20 18:59:13 +07:00
drm fixes for 5.4-rc7
core: - add missing documentation for GEM shmem madvise helpers - Fix for a state dereference in atomic self-refresh helpers fbdev: - One compilation fix for c2p fbdev helpers amdgpu: - Fix navi14 display issue root cause and revert workaround - GPU reset scheduler interaction fix - Fix fan boost on multi-GPU - Gfx10 and sdma5 fixes for navi - GFXOFF fix for renoir - Add navi14 PCI ID - GPUVM fix for arcturus radeon: - Port an SI power fix from amdgpu i915: - Fix HPD poll to avoid kworker consuming a lot of cpu cycles. - Do not use TBT type for non Type-C ports. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdxRBWAAoJEAx081l5xIa+gLkQAIwlK837kDlElMr3VZTnJCkW VoMA3+HXUC2V7KZ9l4cYkBxoHtoQIvGqtHP6Yu4SqZbl05sbNs5KzL71G1y+s4ww EsEHzHqB/eGuG3v1hPV533GaknIY4iao18mFKkWkm0Mk8H9ugSol6bQ61ymY57zf K9iCitnZdbN4Yc2Ag5t/0avObIohfZSub3BHROxcySDRvBYudKeVo0X41kSNf6sQ DKTCGulIj4kNve7OgvXbcqFWsO1VXmp50DjwagawgDg3itKKBo2O4DetdB2CS38e thQy3ojGm5kQ2r7jPYSrbMepDyyeS0ZU3Rm0X9LEyJSQ07+SS4QJlm5LykBjBM5P Qb6nLdzgAKhR7fTRaBjRGKXvGwqi6Ga/b8TqcTHIWVc6BhBror0+y+th1MKQiGrM HRHaAWWNyUhVV1ExDWxbrKECX18Zb6GXiJpbNzrfm6txPW2adbI2huINfjXjft6t Y3/8UbFYW2v9dXdmreRDhfL0izsQDoxyPvJ86aqrYlA4eHOxX67UnFJrskbTz+mf jbQbESAOO/6Z0W8Q4nKUk8iPm6pYXoUVQUL4ZFJsFA1SmjkEl7fIEGdkjmSnHEV0 E3C5ocuS2QNhjo/N/Yzega9weSP6CU98enEkypnv8RQKCVWTVT09FMFO834XtDSK W+cF+VfZCuE0vYOre8cJ =x5KK -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-11-08' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Weekly fixes for drm: amdgpu has a few but they are pretty scattered fixes, the fbdev one is a build regression fix that we didn't want to risk leaving out, otherwise a couple of i915, one radeon and a core atomic fix. core: - add missing documentation for GEM shmem madvise helpers - Fix for a state dereference in atomic self-refresh helpers fbdev: - One compilation fix for c2p fbdev helpers amdgpu: - Fix navi14 display issue root cause and revert workaround - GPU reset scheduler interaction fix - Fix fan boost on multi-GPU - Gfx10 and sdma5 fixes for navi - GFXOFF fix for renoir - Add navi14 PCI ID - GPUVM fix for arcturus radeon: - Port an SI power fix from amdgpu i915: - Fix HPD poll to avoid kworker consuming a lot of cpu cycles. - Do not use TBT type for non Type-C ports" * tag 'drm-fixes-2019-11-08' of git://anongit.freedesktop.org/drm/drm: drm/radeon: fix si_enable_smc_cac() failed issue drm/amdgpu/renoir: move gfxoff handling into gfx9 module drm/amdgpu: add warning for GRBM 1-cycle delay issue in gfx9 drm/amdgpu: add dummy read by engines for some GCVM status registers in gfx10 drm/amdgpu: register gpu instance before fan boost feature enablment drm/amd/swSMU: fix smu workload bit map error drm/shmem: Add docbook comments for drm_gem_shmem_object madvise fields drm/amdgpu: add navi14 PCI ID Revert "drm/amd/display: setting the DIG_MODE to the correct value." drm/amd/display: Add ENGINE_ID_DIGD condition check for Navi14 drm/amdgpu: dont schedule jobs while in reset drm/amdgpu/arcturus: properly set BANK_SELECT and FRAGMENT_SIZE drm/atomic: fix self-refresh helpers crtc state dereference drm/i915/dp: Do not switch aux to TBT mode for non-TC ports drm/i915: Avoid HPD poll detect triggering a new detect cycle fbdev: c2p: Fix link failure on non-inlining
This commit is contained in:
commit
efc61f7cbc
@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_entities; i++)
|
||||
for (i = 0; i < num_entities; i++) {
|
||||
mutex_lock(&ctx->adev->lock_reset);
|
||||
drm_sched_entity_fini(&ctx->entities[0][i].entity);
|
||||
mutex_unlock(&ctx->adev->lock_reset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2885,6 +2885,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
|
||||
* Otherwise the mgpu fan boost feature will be skipped due to the
|
||||
* gpu instance is counted less.
|
||||
*/
|
||||
amdgpu_register_gpu_instance(adev);
|
||||
|
||||
/* enable clockgating, etc. after ib tests, etc. since some blocks require
|
||||
* explicit gating rather than handling it automatically.
|
||||
*/
|
||||
|
@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
|
||||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
|
||||
|
@ -289,6 +289,7 @@ struct amdgpu_gfx {
|
||||
uint32_t mec2_feature_version;
|
||||
bool mec_fw_write_wait;
|
||||
bool me_fw_write_wait;
|
||||
bool cp_fw_write_wait;
|
||||
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
||||
unsigned num_gfx_rings;
|
||||
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
|
@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
amdgpu_register_gpu_instance(adev);
|
||||
out:
|
||||
if (r) {
|
||||
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
||||
|
@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
|
||||
kfree(adev->gfx.rlc.register_list_format);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->gfx.cp_fw_write_wait = false;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_NAVI14:
|
||||
if ((adev->gfx.me_fw_version >= 0x00000046) &&
|
||||
(adev->gfx.me_feature_version >= 27) &&
|
||||
(adev->gfx.pfp_fw_version >= 0x00000068) &&
|
||||
(adev->gfx.pfp_feature_version >= 27) &&
|
||||
(adev->gfx.mec_fw_version >= 0x0000005b) &&
|
||||
(adev->gfx.mec_feature_version >= 27))
|
||||
adev->gfx.cp_fw_write_wait = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->gfx.cp_fw_write_wait == false)
|
||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
||||
GRBM requires 1-cycle delay in cp firmware\n");
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct rlc_firmware_header_v2_1 *rlc_hdr;
|
||||
@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
gfx_v10_0_check_fw_write_wait(adev);
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool fw_version_ok = false;
|
||||
|
||||
fw_version_ok = adev->gfx.cp_fw_write_wait;
|
||||
|
||||
if (fw_version_ok)
|
||||
gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
|
||||
ref, mask, 0x20);
|
||||
else
|
||||
amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
|
||||
ref, mask);
|
||||
}
|
||||
|
||||
static void
|
||||
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
uint32_t me, uint32_t pipe,
|
||||
@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
||||
.emit_tmz = gfx_v10_0_ring_emit_tmz,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||
@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
|
||||
.emit_rreg = gfx_v10_0_ring_emit_rreg,
|
||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||
};
|
||||
|
||||
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
adev->gfx.me_fw_write_wait = false;
|
||||
adev->gfx.mec_fw_write_wait = false;
|
||||
|
||||
if ((adev->gfx.mec_fw_version < 0x000001a5) ||
|
||||
(adev->gfx.mec_feature_version < 46) ||
|
||||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
|
||||
(adev->gfx.pfp_feature_version < 46))
|
||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
||||
GRBM requires 1-cycle delay in cp firmware\n");
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
|
||||
@ -1039,6 +1046,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
||||
!adev->gfx.rlc.is_rlc_v2_1))
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
|
@ -344,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
|
||||
upper_32_bits(pd_addr));
|
||||
|
||||
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
|
||||
|
||||
/* wait for the invalidate to complete */
|
||||
amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
|
||||
1 << vmid, 1 << vmid);
|
||||
amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
|
||||
hub->vm_inv_eng0_ack + eng,
|
||||
req, 1 << vmid);
|
||||
|
||||
return pd_addr;
|
||||
}
|
||||
|
@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
|
||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||
|
||||
tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
|
||||
if (adev->gmc.translate_further) {
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
|
||||
} else {
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
|
||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
|
||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||
|
||||
|
@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
|
||||
}
|
||||
|
||||
static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t reg1,
|
||||
uint32_t ref, uint32_t mask)
|
||||
{
|
||||
amdgpu_ring_emit_wreg(ring, reg0, ref);
|
||||
/* wait for a cycle to reset vm_inv_eng*_ack */
|
||||
amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
|
||||
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
|
||||
}
|
||||
|
||||
static int sdma_v5_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
||||
6 + /* sdma_v5_0_ring_emit_pipeline_sync */
|
||||
/* sdma_v5_0_ring_emit_vm_flush */
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
|
||||
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v5_0_ring_emit_ib,
|
||||
@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
||||
.pad_ib = sdma_v5_0_ring_pad_ib,
|
||||
.emit_wreg = sdma_v5_0_ring_emit_wreg,
|
||||
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
|
||||
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
|
||||
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
|
||||
.preempt_ib = sdma_v5_0_ring_preempt_ib,
|
||||
|
@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -2767,15 +2767,6 @@ void core_link_enable_stream(
|
||||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
COLOR_DEPTH_UNDEFINED);
|
||||
|
||||
/* This second call is needed to reconfigure the DIG
|
||||
* as a workaround for the incorrect value being applied
|
||||
* from transmitter control.
|
||||
*/
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
stream->link->link_enc->funcs->setup(
|
||||
stream->link->link_enc,
|
||||
pipe_ctx->stream->signal);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
|
@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
|
||||
if (!enc1)
|
||||
return NULL;
|
||||
|
||||
if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
|
||||
if (eng_id >= ENGINE_ID_DIGD)
|
||||
eng_id++;
|
||||
}
|
||||
|
||||
dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
|
||||
&stream_enc_regs[eng_id],
|
||||
&se_shift, &se_mask);
|
||||
|
@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
|
@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
|
@ -1581,8 +1581,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct drm_device *dev = old_state->dev;
|
||||
const struct drm_mode_config_helper_funcs *funcs;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
ktime_t start;
|
||||
s64 commit_time_ms;
|
||||
unsigned int i, new_self_refresh_mask = 0;
|
||||
|
||||
funcs = dev->mode_config.helper_private;
|
||||
|
||||
@ -1602,6 +1605,15 @@ static void commit_tail(struct drm_atomic_state *old_state)
|
||||
|
||||
drm_atomic_helper_wait_for_dependencies(old_state);
|
||||
|
||||
/*
|
||||
* We cannot safely access new_crtc_state after
|
||||
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
|
||||
* self-refresh active beforehand:
|
||||
*/
|
||||
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
|
||||
if (new_crtc_state->self_refresh_active)
|
||||
new_self_refresh_mask |= BIT(i);
|
||||
|
||||
if (funcs && funcs->atomic_commit_tail)
|
||||
funcs->atomic_commit_tail(old_state);
|
||||
else
|
||||
@ -1610,7 +1622,8 @@ static void commit_tail(struct drm_atomic_state *old_state)
|
||||
commit_time_ms = ktime_ms_delta(ktime_get(), start);
|
||||
if (commit_time_ms > 0)
|
||||
drm_self_refresh_helper_update_avg_times(old_state,
|
||||
(unsigned long)commit_time_ms);
|
||||
(unsigned long)commit_time_ms,
|
||||
new_self_refresh_mask);
|
||||
|
||||
drm_atomic_helper_commit_cleanup_done(old_state);
|
||||
|
||||
|
@ -133,29 +133,33 @@ static void drm_self_refresh_helper_entry_work(struct work_struct *work)
|
||||
* drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
|
||||
* @state: the state which has just been applied to hardware
|
||||
* @commit_time_ms: the amount of time in ms that this commit took to complete
|
||||
* @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
|
||||
* new state
|
||||
*
|
||||
* Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
|
||||
* update the average entry/exit self refresh times on self refresh transitions.
|
||||
* These averages will be used when calculating how long to delay before
|
||||
* entering self refresh mode after activity.
|
||||
*/
|
||||
void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
|
||||
unsigned int commit_time_ms)
|
||||
void
|
||||
drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
|
||||
unsigned int commit_time_ms,
|
||||
unsigned int new_self_refresh_mask)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
struct drm_crtc_state *old_crtc_state;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
|
||||
bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
|
||||
struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
|
||||
struct ewma_psr_time *time;
|
||||
|
||||
if (old_crtc_state->self_refresh_active ==
|
||||
new_crtc_state->self_refresh_active)
|
||||
new_self_refresh_active)
|
||||
continue;
|
||||
|
||||
if (new_crtc_state->self_refresh_active)
|
||||
if (new_self_refresh_active)
|
||||
time = &sr_data->entry_avg_ms;
|
||||
else
|
||||
time = &sr_data->exit_avg_ms;
|
||||
|
@ -864,6 +864,13 @@ intel_crt_detect(struct drm_connector *connector,
|
||||
|
||||
out:
|
||||
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
|
||||
|
||||
/*
|
||||
* Make sure the refs for power wells enabled during detect are
|
||||
* dropped to avoid a new detect cycle triggered by HPD polling.
|
||||
*/
|
||||
intel_display_power_flush_work(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1256,6 +1256,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
u32 unused)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *i915 =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
|
||||
u32 ret;
|
||||
|
||||
ret = DP_AUX_CH_CTL_SEND_BUSY |
|
||||
@ -1268,7 +1271,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
|
||||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
|
||||
|
||||
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
if (intel_phy_is_tc(i915, phy) &&
|
||||
intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
|
||||
ret |= DP_AUX_CH_CTL_TBT_IO;
|
||||
|
||||
return ret;
|
||||
@ -5436,6 +5440,12 @@ intel_dp_detect(struct drm_connector *connector,
|
||||
if (status != connector_status_connected && !intel_dp->is_mst)
|
||||
intel_dp_unset_edid(intel_dp);
|
||||
|
||||
/*
|
||||
* Make sure the refs for power wells enabled during detect are
|
||||
* dropped to avoid a new detect cycle triggered by HPD polling.
|
||||
*/
|
||||
intel_display_power_flush_work(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -2565,6 +2565,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
if (status != connector_status_connected)
|
||||
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
|
||||
|
||||
/*
|
||||
* Make sure the refs for power wells enabled during detect are
|
||||
* dropped to avoid a new detect cycle triggered by HPD polling.
|
||||
*/
|
||||
intel_display_power_flush_work(dev_priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
|
||||
case 0x682C:
|
||||
si_pi->cac_weights = cac_weights_cape_verde_pro;
|
||||
si_pi->dte_data = dte_data_sun_xt;
|
||||
update_dte_from_pl2 = true;
|
||||
break;
|
||||
case 0x6825:
|
||||
case 0x6827:
|
||||
|
@ -29,7 +29,7 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
|
||||
|
||||
extern void c2p_unsupported(void);
|
||||
|
||||
static inline u32 get_mask(unsigned int n)
|
||||
static __always_inline u32 get_mask(unsigned int n)
|
||||
{
|
||||
switch (n) {
|
||||
case 1:
|
||||
@ -57,7 +57,7 @@ static inline u32 get_mask(unsigned int n)
|
||||
* Transpose operations on 8 32-bit words
|
||||
*/
|
||||
|
||||
static inline void transp8(u32 d[], unsigned int n, unsigned int m)
|
||||
static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m)
|
||||
{
|
||||
u32 mask = get_mask(n);
|
||||
|
||||
@ -99,7 +99,7 @@ static inline void transp8(u32 d[], unsigned int n, unsigned int m)
|
||||
* Transpose operations on 4 32-bit words
|
||||
*/
|
||||
|
||||
static inline void transp4(u32 d[], unsigned int n, unsigned int m)
|
||||
static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m)
|
||||
{
|
||||
u32 mask = get_mask(n);
|
||||
|
||||
@ -126,7 +126,7 @@ static inline void transp4(u32 d[], unsigned int n, unsigned int m)
|
||||
* Transpose operations on 4 32-bit words (reverse order)
|
||||
*/
|
||||
|
||||
static inline void transp4x(u32 d[], unsigned int n, unsigned int m)
|
||||
static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m)
|
||||
{
|
||||
u32 mask = get_mask(n);
|
||||
|
||||
|
@ -44,7 +44,20 @@ struct drm_gem_shmem_object {
|
||||
*/
|
||||
unsigned int pages_use_count;
|
||||
|
||||
/**
|
||||
* @madv: State for madvise
|
||||
*
|
||||
* 0 is active/inuse.
|
||||
* A negative value is the object is purged.
|
||||
* Positive values are driver specific and not used by the helpers.
|
||||
*/
|
||||
int madv;
|
||||
|
||||
/**
|
||||
* @madv_list: List entry for madvise tracking
|
||||
*
|
||||
* Typically used by drivers to track purgeable objects
|
||||
*/
|
||||
struct list_head madv_list;
|
||||
|
||||
/**
|
||||
|
@ -13,7 +13,8 @@ struct drm_crtc;
|
||||
|
||||
void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
|
||||
void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
|
||||
unsigned int commit_time_ms);
|
||||
unsigned int commit_time_ms,
|
||||
unsigned int new_self_refresh_mask);
|
||||
|
||||
int drm_self_refresh_helper_init(struct drm_crtc *crtc);
|
||||
void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
|
||||
|
Loading…
Reference in New Issue
Block a user