mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 05:16:47 +07:00
drm/i915: Flush the RPS bottom-half when the GPU idles
Make sure that the RPS bottom-half is flushed before we set the idle frequency when we decide the GPU is idle. This should prevent any races with the bottom-half and setting the idle frequency, and ensures that the bottom-half is bounded by the GPU's rpm reference taken for when it is active (i.e. between gen6_rps_busy() and gen6_rps_idle()). v2: Avoid recursively using the i915->wq - RPS does not touch the struct_mutex so has no place being on the ordered i915->wq. v3: Enable/disable interrupts for RPS busy/idle in order to prevent further HW access from RPS outside of the wakeref. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Imre Deak <imre.deak@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> References: https://bugs.freedesktop.org/show_bug.cgi?id=89728 Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1467616119-4093-6-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
df4ba5099f
commit
c33d247d0e
@ -2737,7 +2737,6 @@ static int intel_runtime_suspend(struct device *device)
|
||||
|
||||
intel_guc_suspend(dev);
|
||||
|
||||
intel_suspend_gt_powersave(dev_priv);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
ret = 0;
|
||||
@ -2852,8 +2851,6 @@ static int intel_runtime_resume(struct device *device)
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
intel_enable_gt_powersave(dev_priv);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
if (ret)
|
||||
|
@ -351,9 +351,8 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
WARN_ON(dev_priv->rps.pm_iir);
|
||||
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||
WARN_ON_ONCE(dev_priv->rps.pm_iir);
|
||||
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||
dev_priv->rps.interrupts_enabled = true;
|
||||
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
|
||||
dev_priv->pm_rps_events);
|
||||
@ -371,11 +370,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.interrupts_enabled = false;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
|
||||
|
||||
@ -384,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
~dev_priv->pm_rps_events);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
|
||||
/* Now that we will not be generating any more work, flush any
|
||||
* outsanding tasks. As we are called on the RPS idle path,
|
||||
* we will reset the GPU to minimum frequencies, so the current
|
||||
* state of the worker can be discarded.
|
||||
*/
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
gen6_reset_rps_interrupts(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1082,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The RPS work is synced during runtime suspend, we don't require a
|
||||
* wakeref. TODO: instead of disabling the asserts make sure that we
|
||||
* always hold an RPM reference while the work is running.
|
||||
*/
|
||||
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
|
||||
@ -1101,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
|
||||
|
||||
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
@ -1156,8 +1150,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
intel_set_rps(dev_priv, new_delay);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
out:
|
||||
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
@ -1597,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
schedule_work(&dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
}
|
||||
|
@ -4864,6 +4864,8 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(GEN6_PMINTRMSK,
|
||||
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
|
||||
|
||||
gen6_enable_rps_interrupts(dev_priv);
|
||||
|
||||
/* Ensure we start at the user's desired frequency */
|
||||
intel_set_rps(dev_priv,
|
||||
clamp(dev_priv->rps.cur_freq,
|
||||
@ -4875,6 +4877,13 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
|
||||
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Flush our bottom-half so that it does not race with us
|
||||
* setting the idle frequency and so that it is bounded by
|
||||
* our rpm wakeref. And then disable the interrupts to stop any
|
||||
* futher RPS reclocking whilst we are asleep.
|
||||
*/
|
||||
gen6_disable_rps_interrupts(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->rps.enabled) {
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
@ -4915,7 +4924,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
dev_priv->rps.client_boost = true;
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
schedule_work(&dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user