mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 10:36:45 +07:00
drm/i915/gvt: change resetting to resetting_eng
Use resetting_eng to identify which engine is resetting so the rest ones' workload won't be impacted v2: - use ENGINE_MASK(ring_id) instead of (1 << ring_id). (Zhenyu) Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
26a201a2ba
commit
6184cc8ddb
@ -499,10 +499,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_execlist *execlist =
|
||||
&vgpu->execlist[workload->ring_id];
|
||||
int ring_id = workload->ring_id;
|
||||
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||
struct intel_vgpu_workload *next_workload;
|
||||
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
|
||||
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
|
||||
bool lite_restore = false;
|
||||
int ret;
|
||||
|
||||
@ -512,10 +512,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
if (workload->status || vgpu->resetting)
|
||||
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
|
||||
goto out;
|
||||
|
||||
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
|
||||
if (!list_empty(workload_q_head(vgpu, ring_id))) {
|
||||
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
|
||||
|
||||
next_workload = container_of(next,
|
||||
|
@ -149,7 +149,7 @@ struct intel_vgpu {
|
||||
bool active;
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
bool resetting;
|
||||
unsigned int resetting_eng;
|
||||
void *sched_data;
|
||||
struct vgpu_sched_ctl sched_ctl;
|
||||
|
||||
|
@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
if (!workload->status && !(vgpu->resetting_eng &
|
||||
ENGINE_MASK(ring_id))) {
|
||||
update_guest_context(workload);
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
|
@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
vgpu->resetting_eng = resetting_eng;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
intel_vgpu_reset_execlist(vgpu, resetting_eng);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
}
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
vgpu->resetting_eng = 0;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user