mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 19:06:43 +07:00
Merge tag 'gvt-fixes-2020-06-17' of https://github.com/intel/gvt-linux into drm-intel-fixes
gvt-fixes-2020-06-17 - Two missed MMIO handler fixes for SKL/CFL (Colin) - Fix mask register bits check (Colin) - Fix one lockdep error for debugfs entry access (Colin) Signed-off-by: Jani Nikula <jani.nikula@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200617043418.GQ5687@zhen-hp.sh.intel.com
This commit is contained in:
commit
cd65bbb4d8
@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
|
||||
vreg = vgpu_vreg(param->vgpu, offset);
|
||||
|
||||
if (preg != vreg) {
|
||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||
node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(1)) {
|
||||
if (IS_MASKED_BITS_ENABLED(data, 1)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
|
||||
data & _MASKED_BIT_ENABLE(2)) {
|
||||
IS_MASKED_BITS_ENABLED(data, 2)) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
* pvinfo, if not, we will treat this guest as non-gvtg-aware
|
||||
* guest, and stop emulating its cfg space, mmio, gtt, etc.
|
||||
*/
|
||||
if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
|
||||
(data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
|
||||
&& !vgpu->pv_notified) {
|
||||
if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
|
||||
IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
|
||||
!vgpu->pv_notified) {
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
return 0;
|
||||
}
|
||||
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|
||||
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
|
||||
if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
|
||||
IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
|
||||
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
|
||||
|
||||
gvt_dbg_core("EXECLIST %s on ring %s\n",
|
||||
@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
|
||||
if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
|
||||
data |= RESET_CTL_READY_TO_RESET;
|
||||
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
|
||||
data &= ~RESET_CTL_READY_TO_RESET;
|
||||
@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
|
||||
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
|
||||
if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
|
||||
IS_MASKED_BITS_ENABLED(data, 0x8))
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
|
||||
|
||||
return 0;
|
||||
@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
|
||||
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
|
||||
@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
|
||||
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
|
||||
|
||||
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
|
||||
struct i915_request *req);
|
||||
#define IS_RESTORE_INHIBIT(a) \
|
||||
(_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
|
||||
((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
|
||||
|
||||
#define IS_RESTORE_INHIBIT(a) \
|
||||
IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
|
||||
|
||||
#endif
|
||||
|
@ -94,6 +94,11 @@
|
||||
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
|
||||
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
|
||||
|
||||
#define IS_MASKED_BITS_ENABLED(_val, _b) \
|
||||
(((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
|
||||
#define IS_MASKED_BITS_DISABLED(_val, _b) \
|
||||
((_val) & _MASKED_BIT_DISABLE(_b))
|
||||
|
||||
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
|
||||
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
|
||||
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
|
||||
|
Loading…
Reference in New Issue
Block a user