drm/i915: Introduce parameterized DBUF_CTL

Now start using parameterized DBUF_CTL instead
of hardcoded, this would allow shorter access
functions when reading or storing entire state.

Tried to implement it in a MMIO_PIPE manner, however
DBUF_CTL1 address is higher than DBUF_CTL2, which
implies that we have to now subtract from base
rather than add.

v2: - Removed unneeded DBUF_CTL_DIST and DBUF_CTL_ADDR
      macros. Started to use _PICK construct as suggested
      by Matt Roper.

v3: - _DBUF_CTL_S* to DBUF_CTL_S*, changed X to "slice"
      in macro(Ville Syrjälä)
    - Introduced enum for enumerating DBUF slices(Ville Syrjälä)

Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-5-stanislav.lisovskiy@intel.com
This commit is contained in:
Stanislav Lisovskiy 2020-02-03 01:06:28 +02:00 committed by Ville Syrjälä
parent 85487cf4a1
commit 2570b7e3c5
5 changed files with 31 additions and 24 deletions

View File

@ -1041,7 +1041,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u32 tmp = intel_de_read(dev_priv, DBUF_CTL);
u32 tmp = intel_de_read(dev_priv, DBUF_CTL_S(0));
WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
(DBUF_POWER_STATE | DBUF_POWER_REQUEST),
@ -4425,12 +4425,12 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
intel_dbuf_slice_set(dev_priv, DBUF_CTL_S(0), true);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
intel_dbuf_slice_set(dev_priv, DBUF_CTL_S(0), false);
}
static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
@ -4456,9 +4456,11 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
return;
if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
ret = intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(DBUF_S2), true);
else
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
ret = intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(DBUF_S2), false);
if (ret)
dev_priv->enabled_dbuf_slices_num = req_slices;
@ -4466,16 +4468,16 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
intel_de_write(dev_priv, DBUF_CTL_S1,
intel_de_read(dev_priv, DBUF_CTL_S1) | DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S2,
intel_de_read(dev_priv, DBUF_CTL_S2) | DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S2);
intel_de_write(dev_priv, DBUF_CTL_S(0),
intel_de_read(dev_priv, DBUF_CTL_S(0)) | DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S(1),
intel_de_read(dev_priv, DBUF_CTL_S(1)) | DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S(1));
udelay(10);
if (!(intel_de_read(dev_priv, DBUF_CTL_S1) & DBUF_POWER_STATE) ||
!(intel_de_read(dev_priv, DBUF_CTL_S2) & DBUF_POWER_STATE))
if (!(intel_de_read(dev_priv, DBUF_CTL_S(0)) & DBUF_POWER_STATE) ||
!(intel_de_read(dev_priv, DBUF_CTL_S(1)) & DBUF_POWER_STATE))
drm_err(&dev_priv->drm, "DBuf power enable timeout\n");
else
/*
@ -4487,16 +4489,16 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
{
intel_de_write(dev_priv, DBUF_CTL_S1,
intel_de_read(dev_priv, DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S2,
intel_de_read(dev_priv, DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S2);
intel_de_write(dev_priv, DBUF_CTL_S(0),
intel_de_read(dev_priv, DBUF_CTL_S(0)) & ~DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S(1),
intel_de_read(dev_priv, DBUF_CTL_S(1)) & ~DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S(1));
udelay(10);
if ((intel_de_read(dev_priv, DBUF_CTL_S1) & DBUF_POWER_STATE) ||
(intel_de_read(dev_priv, DBUF_CTL_S2) & DBUF_POWER_STATE))
if ((intel_de_read(dev_priv, DBUF_CTL_S(0)) & DBUF_POWER_STATE) ||
(intel_de_read(dev_priv, DBUF_CTL_S(1)) & DBUF_POWER_STATE))
drm_err(&dev_priv->drm, "DBuf power disable timeout!\n");
else
/*

View File

@ -307,6 +307,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
};
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)

View File

@ -2886,7 +2886,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);

View File

@ -7753,9 +7753,9 @@ enum {
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_CTL_S1 _MMIO(0x45008)
#define DBUF_CTL_S2 _MMIO(0x44FE8)
#define _DBUF_CTL_S1 0x45008
#define _DBUF_CTL_S2 0x44FE8
#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
#define DBUF_POWER_REQUEST (1 << 31)
#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)

View File

@ -3613,7 +3613,7 @@ u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
* only that 1 slice enabled until we have a proper way for on-demand
* toggling of the second slice.
*/
if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
if (0 && I915_READ(DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
enabled_dbuf_slices_num++;
return enabled_dbuf_slices_num;