mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 15:16:57 +07:00
drm/i915/gvt: Support PPGTT table load command
The PPGTT in context image can be overridden by LRI cmd with another PPGTT's pdps. In such case, the load mm is used instead of the one in the context image. So we need to load its shadow mm in GVT and replace ppgtt pointers in command. This feature is used by guest IGD driver to share gfx VM between different contexts. Verified by IGT "gem_ctx_clone" test. v4: - consolidate shadow mm handlers (Yan) - fix cmd shadow mm pin error path v3: (Zhenyu Wang) - Cleanup PDP register offset check - Add debug check for guest context ppgtt update - Skip 3-level ppgtt guest handling code. The reason is that all guests now use 4-level ppgtt table and the only left case for 3-level table is ancient aliasing ppgtt case. But those guest kernel has no use of PPGTT LRI command. So 3-level ppgtt guest for this feature becomes simply un-testable. v2: (Zhenyu Wang) - Change to list for handling possible multiple ppgtt table loads in one submission. Make sure shadow mm is to replace for each one. Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Cc: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Tina Zhang <tina.zhang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20200508031409.2562-1-zhenyuw@linux.intel.com
This commit is contained in:
parent
40dcee1b7c
commit
bec3df930f
@ -881,6 +881,47 @@ static int mocs_cmd_reg_handler(struct parser_exec_state *s,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_cmd_update_pdps(unsigned int offset,
|
||||
struct parser_exec_state *s)
|
||||
{
|
||||
u32 base = s->workload->engine->mmio_base;
|
||||
return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
|
||||
}
|
||||
|
||||
static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
|
||||
unsigned int offset, unsigned int index)
|
||||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
|
||||
struct intel_vgpu_mm *mm;
|
||||
u64 pdps[GEN8_3LVL_PDPES];
|
||||
|
||||
if (shadow_mm->ppgtt_mm.root_entry_type ==
|
||||
GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
pdps[0] = (u64)cmd_val(s, 2) << 32;
|
||||
pdps[0] |= cmd_val(s, 4);
|
||||
|
||||
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
|
||||
if (!mm) {
|
||||
gvt_vgpu_err("failed to get the 4-level shadow vm\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
intel_vgpu_mm_get(mm);
|
||||
list_add_tail(&mm->ppgtt_mm.link,
|
||||
&s->workload->lri_shadow_mm);
|
||||
*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
|
||||
*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
|
||||
} else {
|
||||
/* Currently all guests use PML4 table and now can't
|
||||
* have a guest with 3-level table but uses LRI for
|
||||
* PPGTT update. So this is simply un-testable. */
|
||||
GEM_BUG_ON(1);
|
||||
gvt_vgpu_err("invalid shared shadow vm type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmd_reg_handler(struct parser_exec_state *s,
|
||||
unsigned int offset, unsigned int index, char *cmd)
|
||||
{
|
||||
@ -919,6 +960,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
||||
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
|
||||
}
|
||||
|
||||
if (is_cmd_update_pdps(offset, s) &&
|
||||
cmd_pdp_mmio_update_handler(s, offset, index))
|
||||
return -EINVAL;
|
||||
|
||||
/* TODO
|
||||
* In order to let workload with inhibit context to generate
|
||||
* correct image data into memory, vregs values will be loaded to
|
||||
|
@ -1900,6 +1900,7 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
||||
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
|
||||
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
|
||||
INIT_LIST_HEAD(&mm->ppgtt_mm.link);
|
||||
|
||||
if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
|
||||
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
|
||||
|
@ -160,6 +160,7 @@ struct intel_vgpu_mm {
|
||||
|
||||
struct list_head list;
|
||||
struct list_head lru_list;
|
||||
struct list_head link; /* possible LRI shadow mm list */
|
||||
} ppgtt_mm;
|
||||
struct {
|
||||
void *virtual_ggtt;
|
||||
|
@ -2808,7 +2808,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
|
||||
|
||||
#define RING_REG(base) _MMIO((base) + 0x270)
|
||||
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
|
||||
|
@ -647,10 +647,11 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
}
|
||||
}
|
||||
|
||||
static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
static int
|
||||
intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct intel_vgpu_mm *m;
|
||||
int ret = 0;
|
||||
|
||||
ret = intel_vgpu_pin_mm(workload->shadow_mm);
|
||||
@ -665,6 +666,52 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!list_empty(&workload->lri_shadow_mm)) {
|
||||
list_for_each_entry(m, &workload->lri_shadow_mm,
|
||||
ppgtt_mm.link) {
|
||||
ret = intel_vgpu_pin_mm(m);
|
||||
if (ret) {
|
||||
list_for_each_entry_from_reverse(m,
|
||||
&workload->lri_shadow_mm,
|
||||
ppgtt_mm.link)
|
||||
intel_vgpu_unpin_mm(m);
|
||||
gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu_mm *m;
|
||||
|
||||
if (!list_empty(&workload->lri_shadow_mm)) {
|
||||
list_for_each_entry(m, &workload->lri_shadow_mm,
|
||||
ppgtt_mm.link)
|
||||
intel_vgpu_unpin_mm(m);
|
||||
}
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
}
|
||||
|
||||
static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
int ret = 0;
|
||||
|
||||
ret = intel_vgpu_shadow_mm_pin(workload);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to pin shadow mm\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
update_shadow_pdps(workload);
|
||||
|
||||
set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
|
||||
@ -711,7 +758,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
|
||||
err_shadow_batch:
|
||||
release_shadow_batch_buffer(workload);
|
||||
err_unpin_mm:
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
intel_vgpu_shadow_mm_unpin(workload);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -821,6 +868,37 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
|
||||
return workload;
|
||||
}
|
||||
|
||||
static void update_guest_pdps(struct intel_vgpu *vgpu,
|
||||
u64 ring_context_gpa, u32 pdp[8])
|
||||
{
|
||||
u64 gpa;
|
||||
int i;
|
||||
|
||||
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
gpa + i * 8, &pdp[7 - i], 4);
|
||||
}
|
||||
|
||||
static bool
|
||||
check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
|
||||
{
|
||||
if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
|
||||
|
||||
if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
|
||||
gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
/* see comment in LRI handler in cmd_parser.c */
|
||||
gvt_dbg_mm("invalid shadow mm type\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct i915_request *rq = workload->req;
|
||||
@ -906,6 +984,15 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
|
||||
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
||||
|
||||
if (!list_empty(&workload->lri_shadow_mm)) {
|
||||
struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
|
||||
struct intel_vgpu_mm,
|
||||
ppgtt_mm.link);
|
||||
GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
|
||||
update_guest_pdps(vgpu, workload->ring_context_gpa,
|
||||
(void *)m->ppgtt_mm.guest_pdps);
|
||||
}
|
||||
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
@ -1014,7 +1101,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
workload->complete(workload);
|
||||
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
intel_vgpu_shadow_mm_unpin(workload);
|
||||
intel_vgpu_destroy_workload(workload);
|
||||
|
||||
atomic_dec(&s->running_workload_num);
|
||||
@ -1409,6 +1496,16 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
if (!list_empty(&workload->lri_shadow_mm)) {
|
||||
struct intel_vgpu_mm *m, *mm;
|
||||
list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
|
||||
ppgtt_mm.link) {
|
||||
list_del(&m->ppgtt_mm.link);
|
||||
intel_vgpu_mm_put(m);
|
||||
}
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
|
||||
if (workload->shadow_mm)
|
||||
intel_vgpu_mm_put(workload->shadow_mm);
|
||||
|
||||
@ -1427,6 +1524,7 @@ alloc_workload(struct intel_vgpu *vgpu)
|
||||
|
||||
INIT_LIST_HEAD(&workload->list);
|
||||
INIT_LIST_HEAD(&workload->shadow_bb);
|
||||
INIT_LIST_HEAD(&workload->lri_shadow_mm);
|
||||
|
||||
init_waitqueue_head(&workload->shadow_ctx_status_wq);
|
||||
atomic_set(&workload->shadow_ctx_active, 0);
|
||||
|
@ -87,6 +87,7 @@ struct intel_vgpu_workload {
|
||||
int status;
|
||||
|
||||
struct intel_vgpu_mm *shadow_mm;
|
||||
struct list_head lri_shadow_mm; /* For PPGTT load cmd */
|
||||
|
||||
/* different submission model may need different handler */
|
||||
int (*prepare)(struct intel_vgpu_workload *);
|
||||
|
Loading…
Reference in New Issue
Block a user