drm/i915/gvt: refine intel_vgpu_submission_ops as per engine ops

Using per engine ops will be more flexible, here refine sub-ops(init,
clean) as per engine operation align with reset operation. This change also
will be used in next fix patch for VM engine reset.

Cc: Fred Gao <fred.gao@intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Weinan Li <weinan.z.li@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Weinan Li 2018-01-26 15:09:07 +08:00 committed by Rodrigo Vivi
parent b8a89f530f
commit 7569a06dc8
6 changed files with 20 additions and 18 deletions

View File

@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
static void clean_execlist(struct intel_vgpu *vgpu)
static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
enum intel_engine_id i;
unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
struct intel_vgpu_submission *s = &vgpu->submission;
kfree(s->ring_scan_buffer[i]);
s->ring_scan_buffer[i] = NULL;
s->ring_scan_buffer_size[i] = 0;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
}
}
@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu)
static int init_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
reset_execlist(vgpu, ALL_ENGINES);
reset_execlist(vgpu, engine_mask);
return 0;
}

View File

@ -152,8 +152,8 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
int (*init)(struct intel_vgpu *vgpu);
void (*clean)(struct intel_vgpu *vgpu);
int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
};

View File

@ -1527,6 +1527,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
ALL_ENGINES,
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;

View File

@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, 0);
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
@ -1079,6 +1079,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@ -1092,7 +1093,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
return -EINVAL;
if (s->active) {
s->ops->clean(vgpu);
s->ops->clean(vgpu, engine_mask);
s->active = false;
gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
vgpu->id, s->ops->name);
@ -1105,7 +1106,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
return 0;
}
ret = ops[interface]->init(vgpu);
ret = ops[interface]->init(vgpu, engine_mask);
if (ret)
return ret;

View File

@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops

View File

@ -520,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, 0);
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);