mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 19:16:49 +07:00
Merge tag 'gvt-next-2016-11-07' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2016-11-07
- Fix regression from e95433c73a
- Some MMIO handler fixes
- Add better handling for guest reset control
- stratch page table tree for shadow ppgtt
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
commit
d7931c1879
@ -1418,8 +1418,8 @@ static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
|
|||||||
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
|
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
|
||||||
{
|
{
|
||||||
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||||
int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
|
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
|
||||||
sizeof(u32));
|
sizeof(u32);
|
||||||
unsigned long gma, gma_high;
|
unsigned long gma, gma_high;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -138,36 +138,6 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
|||||||
memcpy(&(e)->val64, &v, sizeof(v)); \
|
memcpy(&(e)->val64, &v, sizeof(v)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
enum {
|
|
||||||
GTT_TYPE_INVALID = -1,
|
|
||||||
|
|
||||||
GTT_TYPE_GGTT_PTE,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
|
||||||
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
|
|
||||||
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_PTE_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_PDE_ENTRY,
|
|
||||||
GTT_TYPE_PPGTT_PDP_ENTRY,
|
|
||||||
GTT_TYPE_PPGTT_PML4_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_ROOT_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
|
|
||||||
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_ENTRY,
|
|
||||||
|
|
||||||
GTT_TYPE_PPGTT_PTE_PT,
|
|
||||||
GTT_TYPE_PPGTT_PDE_PT,
|
|
||||||
GTT_TYPE_PPGTT_PDP_PT,
|
|
||||||
GTT_TYPE_PPGTT_PML4_PT,
|
|
||||||
|
|
||||||
GTT_TYPE_MAX,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mappings between GTT_TYPE* enumerations.
|
* Mappings between GTT_TYPE* enumerations.
|
||||||
* Following information can be found according to the given type:
|
* Following information can be found according to the given type:
|
||||||
@ -842,13 +812,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
|
|||||||
{
|
{
|
||||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
struct intel_vgpu_ppgtt_spt *s;
|
struct intel_vgpu_ppgtt_spt *s;
|
||||||
|
intel_gvt_gtt_type_t cur_pt_type;
|
||||||
|
|
||||||
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
|
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
|
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
|
||||||
return 0;
|
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||||
|
cur_pt_type = get_next_pt_type(e->type) + 1;
|
||||||
|
if (ops->get_pfn(e) ==
|
||||||
|
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
|
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
|
||||||
if (!s) {
|
if (!s) {
|
||||||
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
|
gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
|
||||||
@ -1015,7 +990,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
|||||||
if (!ops->test_present(&e))
|
if (!ops->test_present(&e))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
|
if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
|
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
|
||||||
@ -1030,7 +1005,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
|
ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
|
||||||
ppgtt_set_shadow_entry(spt, &e, index);
|
ppgtt_set_shadow_entry(spt, &e, index);
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
@ -1921,47 +1896,101 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_scratch_page(struct intel_vgpu *vgpu)
|
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||||
|
intel_gvt_gtt_type_t type)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
||||||
void *p;
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
void *vaddr;
|
int page_entry_num = GTT_PAGE_SIZE >>
|
||||||
|
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||||
|
struct page *scratch_pt;
|
||||||
unsigned long mfn;
|
unsigned long mfn;
|
||||||
|
int i;
|
||||||
|
void *p;
|
||||||
|
|
||||||
gtt->scratch_page = alloc_page(GFP_KERNEL);
|
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||||
if (!gtt->scratch_page) {
|
return -EINVAL;
|
||||||
gvt_err("Failed to allocate scratch page.\n");
|
|
||||||
|
scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||||
|
if (!scratch_pt) {
|
||||||
|
gvt_err("fail to allocate scratch page\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set to zero */
|
p = kmap_atomic(scratch_pt);
|
||||||
p = kmap_atomic(gtt->scratch_page);
|
mfn = intel_gvt_hypervisor_virt_to_mfn(p);
|
||||||
memset(p, 0, PAGE_SIZE);
|
|
||||||
kunmap_atomic(p);
|
|
||||||
|
|
||||||
/* translate page to mfn */
|
|
||||||
vaddr = page_address(gtt->scratch_page);
|
|
||||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
|
|
||||||
|
|
||||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||||
gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
|
gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
|
||||||
__free_page(gtt->scratch_page);
|
kunmap_atomic(p);
|
||||||
gtt->scratch_page = NULL;
|
__free_page(scratch_pt);
|
||||||
return -ENXIO;
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
gtt->scratch_pt[type].page_mfn = mfn;
|
||||||
|
gtt->scratch_pt[type].page = scratch_pt;
|
||||||
|
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||||
|
vgpu->id, type, mfn);
|
||||||
|
|
||||||
|
/* Build the tree by full filled the scratch pt with the entries which
|
||||||
|
* point to the next level scratch pt or scratch page. The
|
||||||
|
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
||||||
|
* 'type' pt.
|
||||||
|
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
||||||
|
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
|
||||||
|
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||||
|
*/
|
||||||
|
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||||
|
struct intel_gvt_gtt_entry se;
|
||||||
|
|
||||||
|
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
|
||||||
|
se.type = get_entry_type(type - 1);
|
||||||
|
ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
|
||||||
|
|
||||||
|
/* The entry parameters like present/writeable/cache type
|
||||||
|
* set to the same as i915's scratch page tree.
|
||||||
|
*/
|
||||||
|
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
|
||||||
|
if (type == GTT_TYPE_PPGTT_PDE_PT)
|
||||||
|
se.val64 |= PPAT_CACHED_INDEX;
|
||||||
|
|
||||||
|
for (i = 0; i < page_entry_num; i++)
|
||||||
|
ops->set_entry(p, &se, i, false, 0, vgpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
gtt->scratch_page_mfn = mfn;
|
kunmap_atomic(p);
|
||||||
gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_scratch_page(struct intel_vgpu *vgpu)
|
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
if (vgpu->gtt.scratch_page != NULL) {
|
int i;
|
||||||
__free_page(vgpu->gtt.scratch_page);
|
|
||||||
vgpu->gtt.scratch_page = NULL;
|
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||||
vgpu->gtt.scratch_page_mfn = 0;
|
if (vgpu->gtt.scratch_pt[i].page != NULL) {
|
||||||
|
__free_page(vgpu->gtt.scratch_pt[i].page);
|
||||||
|
vgpu->gtt.scratch_pt[i].page = NULL;
|
||||||
|
vgpu->gtt.scratch_pt[i].page_mfn = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int create_scratch_page_tree(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
|
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||||
|
ret = alloc_scratch_pages(vgpu, i);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
release_scratch_page_tree(vgpu);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1995,7 +2024,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|||||||
|
|
||||||
gtt->ggtt_mm = ggtt_mm;
|
gtt->ggtt_mm = ggtt_mm;
|
||||||
|
|
||||||
return create_scratch_page(vgpu);
|
return create_scratch_page_tree(vgpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2014,7 +2043,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
|
|||||||
struct intel_vgpu_mm *mm;
|
struct intel_vgpu_mm *mm;
|
||||||
|
|
||||||
ppgtt_free_all_shadow_page(vgpu);
|
ppgtt_free_all_shadow_page(vgpu);
|
||||||
release_scratch_page(vgpu);
|
release_scratch_page_tree(vgpu);
|
||||||
|
|
||||||
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
||||||
mm = container_of(pos, struct intel_vgpu_mm, list);
|
mm = container_of(pos, struct intel_vgpu_mm, list);
|
||||||
|
@ -88,6 +88,36 @@ enum {
|
|||||||
INTEL_GVT_MM_PPGTT,
|
INTEL_GVT_MM_PPGTT,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
GTT_TYPE_INVALID = -1,
|
||||||
|
|
||||||
|
GTT_TYPE_GGTT_PTE,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||||
|
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
|
||||||
|
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_PTE_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_PDE_ENTRY,
|
||||||
|
GTT_TYPE_PPGTT_PDP_ENTRY,
|
||||||
|
GTT_TYPE_PPGTT_PML4_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_ROOT_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
|
||||||
|
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_ENTRY,
|
||||||
|
|
||||||
|
GTT_TYPE_PPGTT_PTE_PT,
|
||||||
|
GTT_TYPE_PPGTT_PDE_PT,
|
||||||
|
GTT_TYPE_PPGTT_PDP_PT,
|
||||||
|
GTT_TYPE_PPGTT_PML4_PT,
|
||||||
|
|
||||||
|
GTT_TYPE_MAX,
|
||||||
|
} intel_gvt_gtt_type_t;
|
||||||
|
|
||||||
struct intel_vgpu_mm {
|
struct intel_vgpu_mm {
|
||||||
int type;
|
int type;
|
||||||
bool initialized;
|
bool initialized;
|
||||||
@ -151,6 +181,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
|
|||||||
|
|
||||||
struct intel_vgpu_guest_page;
|
struct intel_vgpu_guest_page;
|
||||||
|
|
||||||
|
struct intel_vgpu_scratch_pt {
|
||||||
|
struct page *page;
|
||||||
|
unsigned long page_mfn;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
struct intel_vgpu_gtt {
|
struct intel_vgpu_gtt {
|
||||||
struct intel_vgpu_mm *ggtt_mm;
|
struct intel_vgpu_mm *ggtt_mm;
|
||||||
unsigned long active_ppgtt_mm_bitmap;
|
unsigned long active_ppgtt_mm_bitmap;
|
||||||
@ -160,8 +196,8 @@ struct intel_vgpu_gtt {
|
|||||||
atomic_t n_write_protected_guest_page;
|
atomic_t n_write_protected_guest_page;
|
||||||
struct list_head oos_page_list_head;
|
struct list_head oos_page_list_head;
|
||||||
struct list_head post_shadow_list_head;
|
struct list_head post_shadow_list_head;
|
||||||
struct page *scratch_page;
|
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
|
||||||
unsigned long scratch_page_mfn;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||||
|
@ -1158,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
|
|||||||
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
void *p_data, unsigned int bytes)
|
void *p_data, unsigned int bytes)
|
||||||
{
|
{
|
||||||
u32 mode = *(u32 *)p_data;
|
u32 mode;
|
||||||
|
|
||||||
|
write_vreg(vgpu, offset, p_data, bytes);
|
||||||
|
mode = vgpu_vreg(vgpu, offset);
|
||||||
|
|
||||||
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
|
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
|
||||||
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
|
WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
|
||||||
@ -1275,19 +1278,20 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||||||
switch (offset) {
|
switch (offset) {
|
||||||
case 0x4ddc:
|
case 0x4ddc:
|
||||||
vgpu_vreg(vgpu, offset) = 0x8000003c;
|
vgpu_vreg(vgpu, offset) = 0x8000003c;
|
||||||
|
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
|
||||||
|
if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER))
|
||||||
|
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
|
||||||
break;
|
break;
|
||||||
case 0x42080:
|
case 0x42080:
|
||||||
vgpu_vreg(vgpu, offset) = 0x8000;
|
vgpu_vreg(vgpu, offset) = 0x8000;
|
||||||
|
/* WaCompressedResourceDisplayNewHashMode:skl */
|
||||||
|
if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER))
|
||||||
|
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* TODO: need detect stepping info after gvt contain such information
|
|
||||||
* 0x4ddc enabled after C0, 0x42080 enabled after E0.
|
|
||||||
*/
|
|
||||||
I915_WRITE(reg, vgpu_vreg(vgpu, offset));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1367,6 +1371,8 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
|||||||
int rc = 0;
|
int rc = 0;
|
||||||
unsigned int id = 0;
|
unsigned int id = 0;
|
||||||
|
|
||||||
|
write_vreg(vgpu, offset, p_data, bytes);
|
||||||
|
|
||||||
switch (offset) {
|
switch (offset) {
|
||||||
case 0x4260:
|
case 0x4260:
|
||||||
id = RCS;
|
id = RCS;
|
||||||
@ -1392,6 +1398,23 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||||
|
unsigned int offset, void *p_data, unsigned int bytes)
|
||||||
|
{
|
||||||
|
u32 data;
|
||||||
|
|
||||||
|
write_vreg(vgpu, offset, p_data, bytes);
|
||||||
|
data = vgpu_vreg(vgpu, offset);
|
||||||
|
|
||||||
|
if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
|
||||||
|
data |= RESET_CTL_READY_TO_RESET;
|
||||||
|
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
|
||||||
|
data &= ~RESET_CTL_READY_TO_RESET;
|
||||||
|
|
||||||
|
vgpu_vreg(vgpu, offset) = data;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
|
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
|
||||||
ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
|
ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
|
||||||
f, s, am, rm, d, r, w); \
|
f, s, am, rm, d, r, w); \
|
||||||
@ -2298,6 +2321,15 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
|||||||
|
|
||||||
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
|
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
|
||||||
|
|
||||||
|
#define RING_REG(base) (base + 0xd0)
|
||||||
|
MMIO_RING_F(RING_REG, 4, F_RO, 0,
|
||||||
|
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
|
||||||
|
ring_reset_ctl_write);
|
||||||
|
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
|
||||||
|
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
|
||||||
|
ring_reset_ctl_write);
|
||||||
|
#undef RING_REG
|
||||||
|
|
||||||
#define RING_REG(base) (base + 0x230)
|
#define RING_REG(base) (base + 0x230)
|
||||||
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
|
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
|
||||||
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
|
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
|
||||||
|
@ -152,6 +152,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
|||||||
|
|
||||||
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
||||||
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
||||||
|
else
|
||||||
|
vgpu_vreg(vgpu, regs[ring_id]) = 0;
|
||||||
|
|
||||||
intel_uncore_forcewake_put(dev_priv, fw);
|
intel_uncore_forcewake_put(dev_priv, fw);
|
||||||
|
|
||||||
|
@ -36,12 +36,10 @@
|
|||||||
|
|
||||||
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_execlist *execlist;
|
|
||||||
enum intel_engine_id i;
|
enum intel_engine_id i;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
|
||||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||||
execlist = &vgpu->execlist[i];
|
|
||||||
if (!list_empty(workload_q_head(vgpu, i)))
|
if (!list_empty(workload_q_head(vgpu, i)))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -455,6 +455,8 @@ static int workload_thread(void *priv)
|
|||||||
if (lret < 0) {
|
if (lret < 0) {
|
||||||
workload->status = lret;
|
workload->status = lret;
|
||||||
gvt_err("fail to wait workload, skip\n");
|
gvt_err("fail to wait workload, skip\n");
|
||||||
|
} else {
|
||||||
|
workload->status = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
complete:
|
complete:
|
||||||
|
Loading…
Reference in New Issue
Block a user