mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915/tgl: Switch between dc3co and dc5 based on display idleness
DC3CO is useful power state, when DMC detects PSR2 idle frame while an active video playback, playing 30fps video on 60hz panel is the classic example of this use case. B.Specs:49196 has a restriction to enable DC3CO only for Video Playback. It will be worthy to enable DC3CO after completion of each pageflip and switch back to DC5 when display is idle because driver doesn't differentiate between video playback and a normal pageflip. We will use Frontbuffer flush call tgl_dc3co_flush() to enable DC3CO state only for ORIGIN_FLIP flush call, because DC3CO state has primarily targeted for VPB use case. We are not interested here for frontbuffer invalidates calls because that triggers PSR2 exit, which will explicitly disable DC3CO. DC5 and DC6 saves more power, but can't be entered during video playback because there are not enough idle frames in a row to meet most PSR2 panel deep sleep entry requirement typically 4 frames. As PSR2 existing implementation is using minimum 6 idle frames for deep sleep, it is safer to enable DC5/6 after 6 idle frames (By scheduling a delayed work of 6 idle frames, once DC3CO has been enabled after a pageflip). After manually waiting for 6 idle frames DC5/6 will be enabled and PSR2 deep sleep idle frames will be restored to 6 idle frames, at this point DMC will triggers DC5/6 once PSR2 enters to deep sleep after 6 idle frames. In future when we will enable S/W PSR2 tracking, we can change the PSR2 required deep sleep idle frames to 1 so DMC can trigger the DC5/6 immediately after S/W manual waiting of 6 idle frames get complete. v2: calculated s/w state to switch over dc3co when there is an update. [Imre] Used cancel_delayed_work_sync() in order to avoid any race with already scheduled delayed work. [Imre] v3: Cancel_delayed_work_sync() may blocked the commit work. hence dropping it, dc5_idle_thread() checks the valid wakeref before putting the reference count, which avoids any chances of dropping a zero wakeref. [Imre (IRC)] v4: Used frontbuffer flush mechanism. [Imre] v5: Used psr.pipe to extract frontbuffer busy bits. [Imre] Used cancel_delayed_work_sync() in encoder disable path. [Imre] Used mod_delayed_work() instead of cancelling and scheduling a delayed work. [Imre] Used psr.lock in tgl_dc5_idle_thread() to enable psr2 deep sleep. [Imre] Removed DC5_REQ_IDLE_FRAMES macro. [Imre] v6: Used dc3co_exitline check instead of TGL and dc3co allowed_dc_mask checks, used delayed_work_pending with the psr lock and removed the psr2_deep_slp_disabled flag. [Imre] v7: Code refactoring, moved most of functional code to inte_psr.c [Imre] Using frontbuffer_bits on psr.pipe check instead of busy_frontbuffer_bits. [Imre] Calculating dc3co_exit_delay in intel_psr_enable_locked. [Imre] Cc: Jani Nikula <jani.nikula@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Animesh Manna <animesh.manna@intel.com> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191003081738.22101-6-anshuman.gupta@intel.com
This commit is contained in:
parent
bdacf0871f
commit
1c4d821db9
@ -870,6 +870,51 @@ lookup_power_well(struct drm_i915_private *dev_priv,
|
||||
return &dev_priv->power_domains.power_wells[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_display_power_set_target_dc_state - Set target dc state.
|
||||
* @dev_priv: i915 device
|
||||
* @state: state which needs to be set as target_dc_state.
|
||||
*
|
||||
* This function set the "DC off" power well target_dc_state,
|
||||
* based upon this target_dc_stste, "DC off" power well will
|
||||
* enable desired DC state.
|
||||
*/
|
||||
void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
|
||||
u32 state)
|
||||
{
|
||||
struct i915_power_well *power_well;
|
||||
bool dc_off_enabled;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
|
||||
|
||||
if (WARN_ON(!power_well))
|
||||
goto unlock;
|
||||
|
||||
state = sanitize_target_dc_state(dev_priv, state);
|
||||
|
||||
if (state == dev_priv->csr.target_dc_state)
|
||||
goto unlock;
|
||||
|
||||
dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
|
||||
power_well);
|
||||
/*
|
||||
* If DC off power well is disabled, need to enable and disable the
|
||||
* DC off power well to effect target DC state.
|
||||
*/
|
||||
if (!dc_off_enabled)
|
||||
power_well->desc->ops->enable(dev_priv, power_well);
|
||||
|
||||
dev_priv->csr.target_dc_state = state;
|
||||
|
||||
if (!dc_off_enabled)
|
||||
power_well->desc->ops->disable(dev_priv, power_well);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
|
||||
|
@ -257,6 +257,8 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915);
|
||||
void intel_display_power_resume_early(struct drm_i915_private *i915);
|
||||
void intel_display_power_suspend(struct drm_i915_private *i915);
|
||||
void intel_display_power_resume(struct drm_i915_private *i915);
|
||||
void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
|
||||
u32 state);
|
||||
|
||||
const char *
|
||||
intel_display_power_domain_str(enum intel_display_power_domain domain);
|
||||
|
@ -534,6 +534,73 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
|
||||
return trans == TRANSCODER_EDP;
|
||||
}
|
||||
|
||||
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
|
||||
{
|
||||
if (!cstate || !cstate->base.active)
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(1000 * 1000,
|
||||
drm_mode_vrefresh(&cstate->base.adjusted_mode));
|
||||
}
|
||||
|
||||
static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
|
||||
u32 idle_frames)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
|
||||
val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
|
||||
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
|
||||
val |= idle_frames;
|
||||
I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
|
||||
}
|
||||
|
||||
static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
psr2_program_idle_frames(dev_priv, 0);
|
||||
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
|
||||
}
|
||||
|
||||
static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int idle_frames;
|
||||
|
||||
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
/*
|
||||
* Restore PSR2 idle frame let's use 6 as the minimum to cover all known
|
||||
* cases including the off-by-one issue that HW has in some cases.
|
||||
*/
|
||||
idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
|
||||
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
|
||||
psr2_program_idle_frames(dev_priv, idle_frames);
|
||||
}
|
||||
|
||||
static void tgl_dc5_idle_thread(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), psr.idle_work.work);
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
/* If delayed work is pending, it is not idle */
|
||||
if (delayed_work_pending(&dev_priv->psr.idle_work))
|
||||
goto unlock;
|
||||
|
||||
DRM_DEBUG_KMS("DC5/6 idle thread\n");
|
||||
tgl_psr2_disable_dc3co(dev_priv);
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->psr.dc3co_enabled)
|
||||
return;
|
||||
|
||||
cancel_delayed_work(&dev_priv->psr.idle_work);
|
||||
/* Before PSR2 exit disallow dc3co*/
|
||||
tgl_psr2_disable_dc3co(dev_priv);
|
||||
}
|
||||
|
||||
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -746,6 +813,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
|
||||
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
|
||||
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
|
||||
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
|
||||
dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
|
||||
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
|
||||
|
||||
/*
|
||||
@ -829,6 +898,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
if (dev_priv->psr.psr2_enabled) {
|
||||
tgl_disallow_dc3co_on_psr2_exit(dev_priv);
|
||||
val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
|
||||
WARN_ON(!(val & EDP_PSR2_ENABLE));
|
||||
val &= ~EDP_PSR2_ENABLE;
|
||||
@ -901,6 +971,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
|
||||
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
cancel_work_sync(&dev_priv->psr.work);
|
||||
cancel_delayed_work_sync(&dev_priv->psr.idle_work);
|
||||
}
|
||||
|
||||
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
|
||||
@ -1208,6 +1279,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we will be completely rely on PSR2 S/W tracking in future,
|
||||
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
|
||||
* event also therefore tgl_dc3co_flush() require to be changed
|
||||
* accrodingly in future.
|
||||
*/
|
||||
static void
|
||||
tgl_dc3co_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits, enum fb_op_origin origin)
|
||||
{
|
||||
u32 delay;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
|
||||
if (!dev_priv->psr.dc3co_enabled)
|
||||
goto unlock;
|
||||
|
||||
if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* At every frontbuffer flush flip event modified delay of delayed work,
|
||||
* when delayed work schedules that means display has been idle.
|
||||
*/
|
||||
if (!(frontbuffer_bits &
|
||||
INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
|
||||
goto unlock;
|
||||
|
||||
tgl_psr2_enable_dc3co(dev_priv);
|
||||
/* DC5/DC6 required idle frames = 6 */
|
||||
delay = 6 * dev_priv->psr.dc3co_exit_delay;
|
||||
mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
|
||||
usecs_to_jiffies(delay));
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_flush - Flush PSR
|
||||
* @dev_priv: i915 device
|
||||
@ -1227,8 +1336,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
if (!CAN_PSR(dev_priv))
|
||||
return;
|
||||
|
||||
if (origin == ORIGIN_FLIP)
|
||||
if (origin == ORIGIN_FLIP) {
|
||||
tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
@ -1284,6 +1395,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
|
||||
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
|
||||
|
||||
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
|
@ -502,6 +502,9 @@ struct i915_psr {
|
||||
bool sink_not_reliable;
|
||||
bool irq_aux_error;
|
||||
u16 su_x_granularity;
|
||||
bool dc3co_enabled;
|
||||
u32 dc3co_exit_delay;
|
||||
struct delayed_work idle_work;
|
||||
};
|
||||
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
|
Loading…
Reference in New Issue
Block a user