2016-03-28 22:23:16 +07:00
|
|
|
/*
|
|
|
|
* GTT virtualization
|
|
|
|
*
|
|
|
|
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Zhi Wang <zhi.a.wang@intel.com>
|
|
|
|
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
|
|
|
* Xiao Zheng <xiao.zheng@intel.com>
|
|
|
|
*
|
|
|
|
* Contributors:
|
|
|
|
* Min He <min.he@intel.com>
|
|
|
|
* Bing Niu <bing.niu@intel.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2016-10-20 16:15:03 +07:00
|
|
|
#include "gvt.h"
|
|
|
|
#include "i915_pvinfo.h"
|
2016-03-28 22:23:16 +07:00
|
|
|
#include "trace.h"
|
|
|
|
|
2018-01-30 18:19:44 +07:00
|
|
|
#if defined(VERBOSE_DEBUG)
|
|
|
|
#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
|
|
|
|
#else
|
|
|
|
#define gvt_vdbg_mm(fmt, args...)
|
|
|
|
#endif
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
static bool enable_out_of_sync = false;
|
|
|
|
static int preallocated_oos_pages = 8192;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* validate a gm address and related range size,
|
|
|
|
* translate it to host gm address
|
|
|
|
*/
|
|
|
|
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
|
|
|
|
{
|
2019-05-27 12:45:50 +07:00
|
|
|
if (size == 0)
|
|
|
|
return vgpu_gmadr_is_valid(vgpu, addr);
|
|
|
|
|
|
|
|
if (vgpu_gmadr_is_aperture(vgpu, addr) &&
|
|
|
|
vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
|
|
|
|
return true;
|
|
|
|
else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
|
|
|
|
vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
|
|
|
|
addr, size);
|
|
|
|
return false;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* translate a guest gmadr to host gmadr */
|
|
|
|
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
|
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2020-02-20 23:55:07 +07:00
|
|
|
|
|
|
|
if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
|
|
|
|
"invalid guest gmadr %llx\n", g_addr))
|
2016-03-28 22:23:16 +07:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
|
|
|
|
*h_addr = vgpu_aperture_gmadr_base(vgpu)
|
|
|
|
+ (g_addr - vgpu_aperture_offset(vgpu));
|
|
|
|
else
|
|
|
|
*h_addr = vgpu_hidden_gmadr_base(vgpu)
|
|
|
|
+ (g_addr - vgpu_hidden_offset(vgpu));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* translate a host gmadr to guest gmadr */
|
|
|
|
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
|
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2020-02-20 23:55:07 +07:00
|
|
|
|
|
|
|
if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
|
|
|
|
"invalid host gmadr %llx\n", h_addr))
|
2016-03-28 22:23:16 +07:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
|
|
|
|
*g_addr = vgpu_aperture_gmadr_base(vgpu)
|
|
|
|
+ (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
|
|
|
|
else
|
|
|
|
*g_addr = vgpu_hidden_gmadr_base(vgpu)
|
|
|
|
+ (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
|
|
|
unsigned long *h_index)
|
|
|
|
{
|
|
|
|
u64 h_addr;
|
|
|
|
int ret;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
|
2016-03-28 22:23:16 +07:00
|
|
|
&h_addr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
*h_index = h_addr >> I915_GTT_PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
|
|
|
unsigned long *g_index)
|
|
|
|
{
|
|
|
|
u64 g_addr;
|
|
|
|
int ret;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
|
2016-03-28 22:23:16 +07:00
|
|
|
&g_addr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
*g_index = g_addr >> I915_GTT_PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define gtt_type_is_entry(type) \
|
|
|
|
(type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
|
|
|
|
&& type != GTT_TYPE_PPGTT_PTE_ENTRY \
|
|
|
|
&& type != GTT_TYPE_PPGTT_ROOT_ENTRY)
|
|
|
|
|
|
|
|
#define gtt_type_is_pt(type) \
|
|
|
|
(type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
|
|
|
|
|
|
|
|
#define gtt_type_is_pte_pt(type) \
|
|
|
|
(type == GTT_TYPE_PPGTT_PTE_PT)
|
|
|
|
|
|
|
|
#define gtt_type_is_root_pointer(type) \
|
|
|
|
(gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
|
|
|
|
|
|
|
|
#define gtt_init_entry(e, t, p, v) do { \
|
|
|
|
(e)->type = t; \
|
|
|
|
(e)->pdev = p; \
|
|
|
|
memcpy(&(e)->val64, &v, sizeof(v)); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mappings between GTT_TYPE* enumerations.
|
|
|
|
* Following information can be found according to the given type:
|
|
|
|
* - type of next level page table
|
|
|
|
* - type of entry inside this level page table
|
|
|
|
* - type of entry with PSE set
|
|
|
|
*
|
|
|
|
* If the given type doesn't have such a kind of information,
|
|
|
|
* e.g. give a l4 root entry type, then request to get its PSE type,
|
|
|
|
* give a PTE page table type, then request to get its next level page
|
|
|
|
* table type, as we know l4 root entry doesn't have a PSE bit,
|
|
|
|
* and a PTE page table doesn't have a next level page table type,
|
|
|
|
* GTT_TYPE_INVALID will be returned. This is useful when traversing a
|
|
|
|
* page table.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct gtt_type_table_entry {
|
|
|
|
int entry_type;
|
2017-10-10 16:19:30 +07:00
|
|
|
int pt_type;
|
2016-03-28 22:23:16 +07:00
|
|
|
int next_pt_type;
|
|
|
|
int pse_entry_type;
|
|
|
|
};
|
|
|
|
|
2017-10-10 16:19:30 +07:00
|
|
|
#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
|
2016-03-28 22:23:16 +07:00
|
|
|
[type] = { \
|
|
|
|
.entry_type = e_type, \
|
2017-10-10 16:19:30 +07:00
|
|
|
.pt_type = cpt_type, \
|
2016-03-28 22:23:16 +07:00
|
|
|
.next_pt_type = npt_type, \
|
|
|
|
.pse_entry_type = pse_type, \
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct gtt_type_table_entry gtt_type_table[] = {
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_INVALID,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PML4_PT,
|
|
|
|
GTT_TYPE_INVALID),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
|
|
|
|
GTT_TYPE_PPGTT_PML4_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PML4_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PDP_PT,
|
|
|
|
GTT_TYPE_INVALID),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PML4_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PML4_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PDP_PT,
|
|
|
|
GTT_TYPE_INVALID),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
|
|
|
|
GTT_TYPE_PPGTT_PDP_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDP_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_INVALID,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PDP_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDP_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PDE_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PDE_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
|
2018-05-15 09:35:33 +07:00
|
|
|
/* We take IPS bit as 'PSE' for PTE level. */
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
|
|
|
|
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_INVALID,
|
2018-05-15 09:35:33 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_INVALID,
|
2018-05-15 09:35:33 +07:00
|
|
|
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PTE_PT,
|
|
|
|
GTT_TYPE_INVALID,
|
|
|
|
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PDE_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDE_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_INVALID,
|
|
|
|
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
|
|
|
|
GTT_TYPE_PPGTT_PDP_ENTRY,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_PPGTT_PDP_PT,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_INVALID,
|
|
|
|
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
|
|
|
|
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
|
|
|
|
GTT_TYPE_GGTT_PTE,
|
|
|
|
GTT_TYPE_INVALID,
|
2017-10-10 16:19:30 +07:00
|
|
|
GTT_TYPE_INVALID,
|
2016-03-28 22:23:16 +07:00
|
|
|
GTT_TYPE_INVALID),
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline int get_next_pt_type(int type)
|
|
|
|
{
|
|
|
|
return gtt_type_table[type].next_pt_type;
|
|
|
|
}
|
|
|
|
|
2017-10-10 16:19:30 +07:00
|
|
|
static inline int get_pt_type(int type)
|
|
|
|
{
|
|
|
|
return gtt_type_table[type].pt_type;
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
static inline int get_entry_type(int type)
|
|
|
|
{
|
|
|
|
return gtt_type_table[type].entry_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int get_pse_type(int type)
|
|
|
|
{
|
|
|
|
return gtt_type_table[type].pse_entry_type;
|
|
|
|
}
|
|
|
|
|
2020-03-06 09:08:10 +07:00
|
|
|
static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
|
2016-12-30 13:10:53 +07:00
|
|
|
|
|
|
|
return readq(addr);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2020-03-06 09:08:10 +07:00
|
|
|
static void ggtt_invalidate(struct intel_gt *gt)
|
2017-06-02 14:34:24 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
mmio_hw_access_pre(gt);
|
|
|
|
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
|
|
|
mmio_hw_access_post(gt);
|
2017-06-02 14:34:24 +07:00
|
|
|
}
|
|
|
|
|
2020-03-06 09:08:10 +07:00
|
|
|
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
writeq(pte, addr);
|
|
|
|
}
|
|
|
|
|
2017-08-02 14:06:37 +07:00
|
|
|
static inline int gtt_get_entry64(void *pt,
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry *e,
|
|
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
|
|
struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
2017-08-02 14:06:37 +07:00
|
|
|
return -EINVAL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (hypervisor_access) {
|
|
|
|
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
|
|
|
(index << info->gtt_entry_size_shift),
|
|
|
|
&e->val64, 8);
|
2017-08-02 14:06:37 +07:00
|
|
|
if (WARN_ON(ret))
|
|
|
|
return ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
} else if (!pt) {
|
2020-03-06 09:08:10 +07:00
|
|
|
e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
} else {
|
|
|
|
e->val64 = *((u64 *)pt + index);
|
|
|
|
}
|
2017-08-02 14:06:37 +07:00
|
|
|
return 0;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2017-08-02 14:06:37 +07:00
|
|
|
static inline int gtt_set_entry64(void *pt,
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry *e,
|
|
|
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
|
|
|
struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (WARN_ON(info->gtt_entry_size != 8))
|
2017-08-02 14:06:37 +07:00
|
|
|
return -EINVAL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (hypervisor_access) {
|
|
|
|
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
|
|
|
(index << info->gtt_entry_size_shift),
|
|
|
|
&e->val64, 8);
|
2017-08-02 14:06:37 +07:00
|
|
|
if (WARN_ON(ret))
|
|
|
|
return ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
} else if (!pt) {
|
2020-03-06 09:08:10 +07:00
|
|
|
write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
|
2016-03-28 22:23:16 +07:00
|
|
|
} else {
|
|
|
|
*((u64 *)pt + index) = e->val64;
|
|
|
|
}
|
2017-08-02 14:06:37 +07:00
|
|
|
return 0;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define GTT_HAW 46
|
|
|
|
|
2018-01-30 18:19:55 +07:00
|
|
|
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
|
|
|
|
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
|
2018-05-15 09:35:33 +07:00
|
|
|
#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
|
2018-01-30 18:19:55 +07:00
|
|
|
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:37 +07:00
|
|
|
#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
|
|
|
|
#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
|
|
|
|
|
2018-05-15 09:35:40 +07:00
|
|
|
#define GTT_64K_PTE_STRIDE 16
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
unsigned long pfn;
|
|
|
|
|
|
|
|
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
|
2018-05-15 09:35:33 +07:00
|
|
|
else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
|
|
|
|
pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
else
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
return pfn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
|
|
|
|
{
|
|
|
|
if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
|
|
|
|
e->val64 &= ~ADDR_1G_MASK;
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
|
2016-03-28 22:23:16 +07:00
|
|
|
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
|
|
|
|
e->val64 &= ~ADDR_2M_MASK;
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
|
2018-05-15 09:35:33 +07:00
|
|
|
} else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
|
|
|
|
e->val64 &= ~ADDR_64K_MASK;
|
|
|
|
pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
|
2016-03-28 22:23:16 +07:00
|
|
|
} else {
|
|
|
|
e->val64 &= ~ADDR_4K_MASK;
|
2018-01-30 18:19:47 +07:00
|
|
|
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:47 +07:00
|
|
|
e->val64 |= (pfn << PAGE_SHIFT);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
2018-05-15 09:35:36 +07:00
|
|
|
return !!(e->val64 & _PAGE_PSE);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:38 +07:00
|
|
|
static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
if (gen8_gtt_test_pse(e)) {
|
|
|
|
switch (e->type) {
|
|
|
|
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
|
|
|
e->val64 &= ~_PAGE_PSE;
|
|
|
|
e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
|
|
|
|
break;
|
|
|
|
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
|
|
|
e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
|
|
|
|
e->val64 &= ~_PAGE_PSE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:34 +07:00
|
|
|
static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
|
2016-03-28 22:23:16 +07:00
|
|
|
return false;
|
|
|
|
|
2018-05-15 09:35:34 +07:00
|
|
|
return !!(e->val64 & GEN8_PDE_IPS_64K);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
|
|
|
|
return;
|
|
|
|
|
|
|
|
e->val64 &= ~GEN8_PDE_IPS_64K;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* i915 writes PDP root pointer registers without present bit,
|
|
|
|
* it also works, so we need to treat root pointer entry
|
|
|
|
* specifically.
|
|
|
|
*/
|
|
|
|
if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
|
|
|
|
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
|
|
|
|
return (e->val64 != 0);
|
|
|
|
else
|
2018-01-30 18:19:47 +07:00
|
|
|
return (e->val64 & _PAGE_PRESENT);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
2018-01-30 18:19:47 +07:00
|
|
|
e->val64 &= ~_PAGE_PRESENT;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2017-10-10 16:24:26 +07:00
|
|
|
static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
2018-01-30 18:19:47 +07:00
|
|
|
e->val64 |= _PAGE_PRESENT;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:37 +07:00
|
|
|
static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
|
|
|
e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
/*
|
|
|
|
* Per-platform GMA routines.
|
|
|
|
*/
|
|
|
|
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
|
|
|
|
{
|
2017-10-10 12:51:32 +07:00
|
|
|
unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
trace_gma_index(__func__, gma, x);
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
|
|
|
|
static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
|
|
|
|
{ \
|
|
|
|
unsigned long x = (exp); \
|
|
|
|
trace_gma_index(__func__, gma, x); \
|
|
|
|
return x; \
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
|
|
|
|
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
|
|
|
|
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
|
|
|
|
DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
|
|
|
|
DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
|
|
|
|
|
|
|
|
static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
|
|
|
|
.get_entry = gtt_get_entry64,
|
|
|
|
.set_entry = gtt_set_entry64,
|
|
|
|
.clear_present = gtt_entry_clear_present,
|
2017-10-10 16:24:26 +07:00
|
|
|
.set_present = gtt_entry_set_present,
|
2016-03-28 22:23:16 +07:00
|
|
|
.test_present = gen8_gtt_test_present,
|
|
|
|
.test_pse = gen8_gtt_test_pse,
|
2018-05-15 09:35:38 +07:00
|
|
|
.clear_pse = gen8_gtt_clear_pse,
|
2018-05-15 09:35:34 +07:00
|
|
|
.clear_ips = gen8_gtt_clear_ips,
|
|
|
|
.test_ips = gen8_gtt_test_ips,
|
2018-05-15 09:35:37 +07:00
|
|
|
.clear_64k_splited = gen8_gtt_clear_64k_splited,
|
|
|
|
.set_64k_splited = gen8_gtt_set_64k_splited,
|
|
|
|
.test_64k_splited = gen8_gtt_test_64k_splited,
|
2016-03-28 22:23:16 +07:00
|
|
|
.get_pfn = gen8_gtt_get_pfn,
|
|
|
|
.set_pfn = gen8_gtt_set_pfn,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
|
|
|
|
.gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
|
|
|
|
.gma_to_pte_index = gen8_gma_to_pte_index,
|
|
|
|
.gma_to_pde_index = gen8_gma_to_pde_index,
|
|
|
|
.gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
|
|
|
|
.gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
|
|
|
|
.gma_to_pml4_index = gen8_gma_to_pml4_index,
|
|
|
|
};
|
|
|
|
|
2018-05-15 09:35:36 +07:00
|
|
|
/* Update entry type per pse and ips bit. */
|
|
|
|
static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
|
|
|
|
struct intel_gvt_gtt_entry *entry, bool ips)
|
|
|
|
{
|
|
|
|
switch (entry->type) {
|
|
|
|
case GTT_TYPE_PPGTT_PDE_ENTRY:
|
|
|
|
case GTT_TYPE_PPGTT_PDP_ENTRY:
|
|
|
|
if (pte_ops->test_pse(entry))
|
|
|
|
entry->type = get_pse_type(entry->type);
|
|
|
|
break;
|
|
|
|
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
|
|
|
|
if (ips)
|
|
|
|
entry->type = get_pse_type(entry->type);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(!gtt_type_is_entry(entry->type));
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
/*
|
|
|
|
* MM helpers.
|
|
|
|
*/
|
2018-01-30 18:19:42 +07:00
|
|
|
static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index,
|
|
|
|
bool guest)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:42 +07:00
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
entry->type = mm->ppgtt_mm.root_entry_type;
|
|
|
|
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
|
|
|
|
mm->ppgtt_mm.shadow_pdps,
|
|
|
|
entry, index, false, 0, mm->vgpu);
|
2018-05-15 09:35:36 +07:00
|
|
|
update_entry_type_for_real(pte_ops, entry, false);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:42 +07:00
|
|
|
_ppgtt_get_root_entry(mm, entry, index, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
_ppgtt_get_root_entry(mm, entry, index, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index,
|
|
|
|
bool guest)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
|
|
|
|
mm->ppgtt_mm.shadow_pdps,
|
|
|
|
entry, index, false, 0, mm->vgpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
_ppgtt_set_root_entry(mm, entry, index, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
_ppgtt_set_root_entry(mm, entry, index, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
|
|
|
|
|
|
|
|
entry->type = GTT_TYPE_GGTT_PTE;
|
|
|
|
pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
|
|
|
|
false, 0, mm->vgpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
|
|
|
|
|
|
|
|
pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
|
|
|
|
false, 0, mm->vgpu);
|
|
|
|
}
|
|
|
|
|
2018-03-27 14:35:14 +07:00
|
|
|
static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
|
|
|
|
|
|
|
|
pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PPGTT shadow page table helpers.
|
|
|
|
*/
|
2017-08-02 14:06:37 +07:00
|
|
|
static inline int ppgtt_spt_get_entry(
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
|
|
void *page_table, int type,
|
|
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
|
|
bool guest)
|
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
2017-08-02 14:06:37 +07:00
|
|
|
int ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
e->type = get_entry_type(type);
|
|
|
|
|
|
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
2017-08-02 14:06:37 +07:00
|
|
|
return -EINVAL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2017-08-02 14:06:37 +07:00
|
|
|
ret = ops->get_entry(page_table, e, index, guest,
|
2018-01-30 18:19:53 +07:00
|
|
|
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
2016-03-28 22:23:16 +07:00
|
|
|
spt->vgpu);
|
2017-08-02 14:06:37 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-05-15 09:35:36 +07:00
|
|
|
update_entry_type_for_real(ops, e, guest ?
|
|
|
|
spt->guest_page.pde_ips : false);
|
2018-01-30 18:19:44 +07:00
|
|
|
|
|
|
|
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
|
|
|
|
type, e->type, index, e->val64);
|
2017-08-02 14:06:37 +07:00
|
|
|
return 0;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2017-08-02 14:06:37 +07:00
|
|
|
static inline int ppgtt_spt_set_entry(
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
|
|
|
void *page_table, int type,
|
|
|
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
|
|
|
bool guest)
|
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
2017-08-02 14:06:37 +07:00
|
|
|
return -EINVAL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:44 +07:00
|
|
|
gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
|
|
|
|
type, e->type, index, e->val64);
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
return ops->set_entry(page_table, e, index, guest,
|
2018-01-30 18:19:53 +07:00
|
|
|
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
2016-03-28 22:23:16 +07:00
|
|
|
spt->vgpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ppgtt_get_guest_entry(spt, e, index) \
|
|
|
|
ppgtt_spt_get_entry(spt, NULL, \
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.type, e, index, true)
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
#define ppgtt_set_guest_entry(spt, e, index) \
|
|
|
|
ppgtt_spt_set_entry(spt, NULL, \
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.type, e, index, true)
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
#define ppgtt_get_shadow_entry(spt, e, index) \
|
|
|
|
ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
|
|
|
|
spt->shadow_page.type, e, index, false)
|
|
|
|
|
|
|
|
#define ppgtt_set_shadow_entry(spt, e, index) \
|
|
|
|
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
|
|
|
|
spt->shadow_page.type, e, index, false)
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static void *alloc_spt(gfp_t gfp_mask)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt = kzalloc(sizeof(*spt), gfp_mask);
|
|
|
|
if (!spt)
|
|
|
|
return NULL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->shadow_page.page = alloc_page(gfp_mask);
|
|
|
|
if (!spt->shadow_page.page) {
|
|
|
|
kfree(spt);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
__free_page(spt->shadow_page.page);
|
|
|
|
kfree(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2017-09-29 01:47:55 +07:00
|
|
|
static int detach_oos_page(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_oos_page *oos_page);
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
|
2017-09-29 01:47:55 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
|
|
|
|
PCI_DMA_BIDIRECTIONAL);
|
2018-01-30 18:19:54 +07:00
|
|
|
|
|
|
|
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:39 +07:00
|
|
|
if (spt->guest_page.gfn) {
|
|
|
|
if (spt->guest_page.oos_page)
|
|
|
|
detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:39 +07:00
|
|
|
intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
list_del_init(&spt->post_shadow_list);
|
|
|
|
free_spt(spt);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2019-04-04 14:30:56 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt, *spn;
|
2018-01-30 18:19:54 +07:00
|
|
|
struct radix_tree_iter iter;
|
2019-04-04 14:30:56 +07:00
|
|
|
LIST_HEAD(all_spt);
|
|
|
|
void __rcu **slot;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2019-04-04 14:30:56 +07:00
|
|
|
rcu_read_lock();
|
2018-01-30 18:19:54 +07:00
|
|
|
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
|
|
|
|
spt = radix_tree_deref_slot(slot);
|
2019-04-04 14:30:56 +07:00
|
|
|
list_move(&spt->post_shadow_list, &all_spt);
|
2018-01-30 18:19:54 +07:00
|
|
|
}
|
2019-04-04 14:30:56 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
|
|
|
|
ppgtt_free_spt(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2017-09-29 01:47:55 +07:00
|
|
|
static int ppgtt_handle_guest_write_page_table_bytes(
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
2016-03-28 22:23:16 +07:00
|
|
|
u64 pa, void *p_data, int bytes);
|
|
|
|
|
2018-01-30 18:19:53 +07:00
|
|
|
static int ppgtt_write_protection_handler(
|
|
|
|
struct intel_vgpu_page_track *page_track,
|
|
|
|
u64 gpa, void *data, int bytes)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:53 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-30 18:19:53 +07:00
|
|
|
ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
/* Find a spt by guest gfn. */
|
|
|
|
static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
|
|
|
|
struct intel_vgpu *vgpu, unsigned long gfn)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_page_track *track;
|
|
|
|
|
2018-01-30 18:19:53 +07:00
|
|
|
track = intel_vgpu_find_page_track(vgpu, gfn);
|
|
|
|
if (track && track->handler == ppgtt_write_protection_handler)
|
|
|
|
return track->priv_data;
|
2018-01-30 18:19:49 +07:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the spt by shadow page mfn. */
|
2018-01-30 18:19:54 +07:00
|
|
|
static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu *vgpu, unsigned long mfn)
|
|
|
|
{
|
2018-01-30 18:19:54 +07:00
|
|
|
return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
|
2018-01-30 18:19:49 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:39 +07:00
|
|
|
/* Allocate shadow page table without guest page. */
|
2018-01-30 18:19:50 +07:00
|
|
|
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
2019-04-23 19:04:08 +07:00
|
|
|
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt = NULL;
|
2018-01-30 18:19:49 +07:00
|
|
|
dma_addr_t daddr;
|
2018-01-30 18:19:53 +07:00
|
|
|
int ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
retry:
|
|
|
|
spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (!spt) {
|
2018-01-30 18:19:40 +07:00
|
|
|
if (reclaim_one_ppgtt_mm(vgpu->gvt))
|
2016-03-28 22:23:16 +07:00
|
|
|
goto retry;
|
|
|
|
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
spt->vgpu = vgpu;
|
|
|
|
atomic_set(&spt->refcount, 1);
|
|
|
|
INIT_LIST_HEAD(&spt->post_shadow_list);
|
|
|
|
|
|
|
|
/*
|
2018-01-30 18:19:49 +07:00
|
|
|
* Init shadow_page.
|
2016-03-28 22:23:16 +07:00
|
|
|
*/
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->shadow_page.type = type;
|
|
|
|
daddr = dma_map_page(kdev, spt->shadow_page.page,
|
|
|
|
0, 4096, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
if (dma_mapping_error(kdev, daddr)) {
|
|
|
|
gvt_vgpu_err("fail to map dma addr\n");
|
2018-01-30 18:19:54 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_free_spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
|
|
|
|
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:54 +07:00
|
|
|
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
|
|
|
|
if (ret)
|
2018-05-15 09:35:39 +07:00
|
|
|
goto err_unmap_dma;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
return spt;
|
2018-01-30 18:19:54 +07:00
|
|
|
|
|
|
|
err_unmap_dma:
|
|
|
|
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
err_free_spt:
|
|
|
|
free_spt(spt);
|
|
|
|
return ERR_PTR(ret);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:39 +07:00
|
|
|
/* Allocate shadow page table associated with specific gfn. */
|
|
|
|
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
|
2019-04-23 19:04:08 +07:00
|
|
|
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
|
2018-05-15 09:35:39 +07:00
|
|
|
unsigned long gfn, bool guest_pde_ips)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spt = ppgtt_alloc_spt(vgpu, type);
|
|
|
|
if (IS_ERR(spt))
|
|
|
|
return spt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Init guest_page.
|
|
|
|
*/
|
|
|
|
ret = intel_vgpu_register_page_track(vgpu, gfn,
|
|
|
|
ppgtt_write_protection_handler, spt);
|
|
|
|
if (ret) {
|
|
|
|
ppgtt_free_spt(spt);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
spt->guest_page.type = type;
|
|
|
|
spt->guest_page.gfn = gfn;
|
|
|
|
spt->guest_page.pde_ips = guest_pde_ips;
|
|
|
|
|
|
|
|
trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
|
|
|
|
|
|
|
|
return spt;
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
#define pt_entry_size_shift(spt) \
|
|
|
|
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
|
|
|
|
|
|
|
|
#define pt_entries(spt) \
|
2017-10-10 12:51:32 +07:00
|
|
|
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
#define for_each_present_guest_entry(spt, e, i) \
|
2018-05-15 09:35:40 +07:00
|
|
|
for (i = 0; i < pt_entries(spt); \
|
|
|
|
i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
|
2017-08-02 14:06:37 +07:00
|
|
|
if (!ppgtt_get_guest_entry(spt, e, i) && \
|
|
|
|
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
#define for_each_present_shadow_entry(spt, e, i) \
|
2018-05-15 09:35:40 +07:00
|
|
|
for (i = 0; i < pt_entries(spt); \
|
|
|
|
i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
|
2017-08-02 14:06:37 +07:00
|
|
|
if (!ppgtt_get_shadow_entry(spt, e, i) && \
|
|
|
|
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:43 +07:00
|
|
|
#define for_each_shadow_entry(spt, e, i) \
|
|
|
|
for (i = 0; i < pt_entries(spt); \
|
|
|
|
i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
|
|
|
|
if (!ppgtt_get_shadow_entry(spt, e, i))
|
|
|
|
|
2018-05-15 09:35:45 +07:00
|
|
|
static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
int v = atomic_read(&spt->refcount);
|
|
|
|
|
|
|
|
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
|
|
|
|
atomic_inc(&spt->refcount);
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:45 +07:00
|
|
|
static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|
|
|
{
|
|
|
|
int v = atomic_read(&spt->refcount);
|
|
|
|
|
|
|
|
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
|
|
|
|
return atomic_dec_return(&spt->refcount);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry *e)
|
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_vgpu_ppgtt_spt *s;
|
2019-04-23 19:04:08 +07:00
|
|
|
enum intel_gvt_gtt_type cur_pt_type;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
|
|
|
|
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
2019-05-02 16:59:21 +07:00
|
|
|
cur_pt_type = get_next_pt_type(e->type);
|
|
|
|
|
|
|
|
if (!gtt_type_is_pt(cur_pt_type) ||
|
|
|
|
!gtt_type_is_pt(cur_pt_type + 1)) {
|
2020-02-20 23:55:07 +07:00
|
|
|
drm_WARN(&i915->drm, 1,
|
|
|
|
"Invalid page table type, cur_pt_type is: %d\n",
|
|
|
|
cur_pt_type);
|
2019-05-02 16:59:21 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_pt_type += 1;
|
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
if (ops->get_pfn(e) ==
|
|
|
|
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
|
|
|
|
return 0;
|
|
|
|
}
|
2018-01-30 18:19:49 +07:00
|
|
|
s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
|
2016-03-28 22:23:16 +07:00
|
|
|
if (!s) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
|
|
|
|
ops->get_pfn(e));
|
2016-03-28 22:23:16 +07:00
|
|
|
return -ENXIO;
|
|
|
|
}
|
2018-01-30 18:19:50 +07:00
|
|
|
return ppgtt_invalidate_spt(s);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-03-01 14:49:59 +07:00
|
|
|
static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
|
|
|
|
struct intel_gvt_gtt_entry *entry)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
unsigned long pfn;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
pfn = ops->get_pfn(entry);
|
|
|
|
type = spt->shadow_page.type;
|
|
|
|
|
2018-05-15 09:35:43 +07:00
|
|
|
/* Uninitialized spte or unshadowed spte. */
|
|
|
|
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
|
2018-03-01 14:49:59 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2017-03-10 16:26:53 +07:00
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry e;
|
|
|
|
unsigned long index;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_spt_change(spt->vgpu->id, "die", spt,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.gfn, spt->shadow_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:45 +07:00
|
|
|
if (ppgtt_put_spt(spt) > 0)
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for_each_present_shadow_entry(spt, &e, index) {
|
2018-01-30 18:19:48 +07:00
|
|
|
switch (e.type) {
|
|
|
|
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
|
|
|
|
gvt_vdbg_mm("invalidate 4K entry\n");
|
2018-03-01 14:49:59 +07:00
|
|
|
ppgtt_invalidate_pte(spt, &e);
|
|
|
|
break;
|
2018-05-15 09:35:33 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
|
2018-05-15 09:35:41 +07:00
|
|
|
/* We don't setup 64K shadow entry so far. */
|
|
|
|
WARN(1, "suspicious 64K gtt entry\n");
|
|
|
|
continue;
|
2018-01-30 18:19:48 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
2018-05-15 09:35:43 +07:00
|
|
|
gvt_vdbg_mm("invalidate 2M entry\n");
|
|
|
|
continue;
|
2018-01-30 18:19:48 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
2018-05-15 09:35:43 +07:00
|
|
|
WARN(1, "GVT doesn't support 1GB page\n");
|
2018-01-30 18:19:48 +07:00
|
|
|
continue;
|
|
|
|
case GTT_TYPE_PPGTT_PML4_ENTRY:
|
|
|
|
case GTT_TYPE_PPGTT_PDP_ENTRY:
|
|
|
|
case GTT_TYPE_PPGTT_PDE_ENTRY:
|
|
|
|
gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
|
2018-01-30 18:19:50 +07:00
|
|
|
ret = ppgtt_invalidate_spt_by_shadow_entry(
|
2018-01-30 18:19:48 +07:00
|
|
|
spt->vgpu, &e);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(1);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
}
|
2018-03-01 14:49:59 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
trace_spt_change(spt->vgpu->id, "release", spt,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.gfn, spt->shadow_page.type);
|
2018-01-30 18:19:50 +07:00
|
|
|
ppgtt_free_spt(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
fail:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
|
|
|
|
spt, e.val64, e.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:36 +07:00
|
|
|
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
2018-05-15 09:35:36 +07:00
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
|
|
|
|
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
|
|
|
|
GAMW_ECO_ENABLE_64K_IPS_FIELD;
|
|
|
|
|
|
|
|
return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
|
|
|
|
} else if (INTEL_GEN(dev_priv) >= 11) {
|
|
|
|
/* 64K paging only controlled by IPS bit in PTE now. */
|
|
|
|
return true;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt = NULL;
|
2018-05-15 09:35:36 +07:00
|
|
|
bool ips = false;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:44 +07:00
|
|
|
if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
|
|
|
|
ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
|
2018-05-15 09:35:44 +07:00
|
|
|
if (spt) {
|
2018-01-30 18:19:50 +07:00
|
|
|
ppgtt_get_spt(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-05-15 09:35:44 +07:00
|
|
|
if (ips != spt->guest_page.pde_ips) {
|
|
|
|
spt->guest_page.pde_ips = ips;
|
|
|
|
|
|
|
|
gvt_dbg_mm("reshadow PDE since ips changed\n");
|
|
|
|
clear_page(spt->shadow_page.vaddr);
|
|
|
|
ret = ppgtt_populate_spt(spt);
|
2018-05-15 09:35:45 +07:00
|
|
|
if (ret) {
|
|
|
|
ppgtt_put_spt(spt);
|
|
|
|
goto err;
|
|
|
|
}
|
2018-05-15 09:35:44 +07:00
|
|
|
}
|
|
|
|
} else {
|
2016-03-28 22:23:16 +07:00
|
|
|
int type = get_next_pt_type(we->type);
|
|
|
|
|
2019-05-13 16:22:44 +07:00
|
|
|
if (!gtt_type_is_pt(type)) {
|
|
|
|
ret = -EINVAL;
|
2019-04-23 19:04:13 +07:00
|
|
|
goto err;
|
2019-05-13 16:22:44 +07:00
|
|
|
}
|
2019-04-23 19:04:13 +07:00
|
|
|
|
2018-05-15 09:35:39 +07:00
|
|
|
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
|
2018-01-30 18:19:49 +07:00
|
|
|
if (IS_ERR(spt)) {
|
|
|
|
ret = PTR_ERR(spt);
|
2018-05-15 09:35:45 +07:00
|
|
|
goto err;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:53 +07:00
|
|
|
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
2018-05-15 09:35:45 +07:00
|
|
|
goto err_free_spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
ret = ppgtt_populate_spt(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
2018-05-15 09:35:45 +07:00
|
|
|
goto err_free_spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
|
|
|
|
spt->shadow_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2018-01-30 18:19:49 +07:00
|
|
|
return spt;
|
2018-05-15 09:35:45 +07:00
|
|
|
|
|
|
|
err_free_spt:
|
|
|
|
ppgtt_free_spt(spt);
|
2019-05-02 16:59:22 +07:00
|
|
|
spt = NULL;
|
2018-05-15 09:35:45 +07:00
|
|
|
err:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
2018-01-30 18:19:49 +07:00
|
|
|
spt, we->val64, we->type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
|
|
|
|
struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
|
|
|
|
|
|
|
|
se->type = ge->type;
|
|
|
|
se->val64 = ge->val64;
|
|
|
|
|
2018-05-15 09:35:41 +07:00
|
|
|
/* Because we always split 64KB pages, so clear IPS in shadow PDE. */
|
|
|
|
if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
|
|
|
|
ops->clear_ips(se);
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
ops->set_pfn(se, s->shadow_page.mfn);
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:43 +07:00
|
|
|
/**
|
2018-07-31 10:02:12 +07:00
|
|
|
* Check if can do 2M page
|
|
|
|
* @vgpu: target vgpu
|
|
|
|
* @entry: target pfn's gtt entry
|
|
|
|
*
|
2018-05-15 09:35:43 +07:00
|
|
|
* Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
|
|
|
|
* negtive if found err.
|
|
|
|
*/
|
|
|
|
static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_gvt_gtt_entry *entry)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
unsigned long pfn;
|
|
|
|
|
2020-03-06 09:08:10 +07:00
|
|
|
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
|
2018-05-15 09:35:43 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
|
|
|
|
if (pfn == INTEL_GVT_INVALID_ADDR)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return PageTransHuge(pfn_to_page(pfn));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
|
|
|
struct intel_gvt_gtt_entry *se)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_vgpu_ppgtt_spt *sub_spt;
|
|
|
|
struct intel_gvt_gtt_entry sub_se;
|
|
|
|
unsigned long start_gfn;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
unsigned long sub_index;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
|
|
|
|
|
|
|
|
start_gfn = ops->get_pfn(se);
|
|
|
|
|
|
|
|
sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
|
|
|
|
if (IS_ERR(sub_spt))
|
|
|
|
return PTR_ERR(sub_spt);
|
|
|
|
|
|
|
|
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
|
|
|
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
|
|
|
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
|
|
|
if (ret) {
|
|
|
|
ppgtt_invalidate_spt(spt);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
sub_se.val64 = se->val64;
|
|
|
|
|
|
|
|
/* Copy the PAT field from PDE. */
|
|
|
|
sub_se.val64 &= ~_PAGE_PAT;
|
|
|
|
sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
|
|
|
|
|
|
|
|
ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
|
|
|
|
ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear dirty field. */
|
|
|
|
se->val64 &= ~_PAGE_DIRTY;
|
|
|
|
|
|
|
|
ops->clear_pse(se);
|
|
|
|
ops->clear_ips(se);
|
|
|
|
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
|
|
|
ppgtt_set_shadow_entry(spt, se, index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-15 09:35:41 +07:00
|
|
|
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
|
|
|
struct intel_gvt_gtt_entry *se)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_gvt_gtt_entry entry = *se;
|
|
|
|
unsigned long start_gfn;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
|
|
|
|
|
|
|
|
GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
|
|
|
|
|
|
|
|
start_gfn = ops->get_pfn(se);
|
|
|
|
|
|
|
|
entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
|
|
|
|
ops->set_64k_splited(&entry);
|
|
|
|
|
|
|
|
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
|
|
|
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
2018-05-15 09:35:42 +07:00
|
|
|
start_gfn + i, PAGE_SIZE, &dma_addr);
|
2018-05-15 09:35:41 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
|
|
|
|
ppgtt_set_shadow_entry(spt, &entry, index + i);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
|
|
|
struct intel_gvt_gtt_entry *ge)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_gvt_gtt_entry se = *ge;
|
2018-05-15 09:35:43 +07:00
|
|
|
unsigned long gfn, page_size = PAGE_SIZE;
|
2018-03-01 14:49:59 +07:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int ret;
|
2018-01-30 18:19:48 +07:00
|
|
|
|
|
|
|
if (!pte_ops->test_present(ge))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
gfn = pte_ops->get_pfn(ge);
|
|
|
|
|
|
|
|
switch (ge->type) {
|
|
|
|
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
|
|
|
|
gvt_vdbg_mm("shadow 4K gtt entry\n");
|
|
|
|
break;
|
2018-05-15 09:35:33 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
|
2018-05-15 09:35:41 +07:00
|
|
|
gvt_vdbg_mm("shadow 64K gtt entry\n");
|
|
|
|
/*
|
|
|
|
* The layout of 64K page is special, the page size is
|
|
|
|
* controlled by uper PDE. To be simple, we always split
|
|
|
|
* 64K page to smaller 4K pages in shadow PT.
|
|
|
|
*/
|
|
|
|
return split_64KB_gtt_entry(vgpu, spt, index, &se);
|
2018-01-30 18:19:48 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
2018-05-15 09:35:43 +07:00
|
|
|
gvt_vdbg_mm("shadow 2M gtt entry\n");
|
|
|
|
ret = is_2MB_gtt_possible(vgpu, ge);
|
|
|
|
if (ret == 0)
|
|
|
|
return split_2MB_gtt_entry(vgpu, spt, index, &se);
|
|
|
|
else if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
page_size = I915_GTT_PAGE_SIZE_2M;
|
|
|
|
break;
|
2018-01-30 18:19:48 +07:00
|
|
|
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
2018-05-15 09:35:43 +07:00
|
|
|
gvt_vgpu_err("GVT doesn't support 1GB entry\n");
|
2018-01-30 18:19:48 +07:00
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(1);
|
2019-12-16 10:44:05 +07:00
|
|
|
}
|
2018-01-30 18:19:48 +07:00
|
|
|
|
|
|
|
/* direct shadow */
|
2018-05-15 09:35:43 +07:00
|
|
|
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
|
|
|
|
&dma_addr);
|
2018-03-01 14:49:59 +07:00
|
|
|
if (ret)
|
2018-01-30 18:19:48 +07:00
|
|
|
return -ENXIO;
|
|
|
|
|
2018-03-01 14:49:59 +07:00
|
|
|
pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
|
2018-01-30 18:19:48 +07:00
|
|
|
ppgtt_set_shadow_entry(spt, &se, index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
2017-12-22 17:06:31 +07:00
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *s;
|
|
|
|
struct intel_gvt_gtt_entry se, ge;
|
2017-12-22 17:06:31 +07:00
|
|
|
unsigned long gfn, i;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_spt_change(spt->vgpu->id, "born", spt,
|
2018-01-30 18:19:53 +07:00
|
|
|
spt->guest_page.gfn, spt->shadow_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
for_each_present_guest_entry(spt, &ge, i) {
|
|
|
|
if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
|
2018-01-30 18:19:50 +07:00
|
|
|
s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
|
2018-01-30 18:19:48 +07:00
|
|
|
if (IS_ERR(s)) {
|
|
|
|
ret = PTR_ERR(s);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ppgtt_get_shadow_entry(spt, &se, i);
|
|
|
|
ppgtt_generate_shadow_entry(&se, s, &ge);
|
|
|
|
ppgtt_set_shadow_entry(spt, &se, i);
|
|
|
|
} else {
|
2017-12-22 17:06:31 +07:00
|
|
|
gfn = ops->get_pfn(&ge);
|
2018-01-30 18:19:48 +07:00
|
|
|
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
|
2017-12-22 17:06:31 +07:00
|
|
|
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
|
2018-01-30 18:19:48 +07:00
|
|
|
ppgtt_set_shadow_entry(spt, &se, i);
|
|
|
|
continue;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
|
|
|
spt, ge.val64, ge.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
|
2017-08-14 14:24:14 +07:00
|
|
|
struct intel_gvt_gtt_entry *se, unsigned long index)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_guest_change(spt->vgpu->id, "remove", spt,
|
|
|
|
spt->shadow_page.type, se->val64, index);
|
2016-11-07 09:44:36 +07:00
|
|
|
|
2018-01-30 18:19:44 +07:00
|
|
|
gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
|
|
|
|
se->type, index, se->val64);
|
|
|
|
|
2017-08-14 14:24:14 +07:00
|
|
|
if (!ops->test_present(se))
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
if (ops->get_pfn(se) ==
|
|
|
|
vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-08-14 14:24:14 +07:00
|
|
|
if (gtt_type_is_pt(get_next_pt_type(se->type))) {
|
2016-11-07 09:44:36 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *s =
|
2018-01-30 18:19:49 +07:00
|
|
|
intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
|
2016-11-07 09:44:36 +07:00
|
|
|
if (!s) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to find guest page\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
ret = -ENXIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
2018-01-30 18:19:50 +07:00
|
|
|
ret = ppgtt_invalidate_spt(s);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2018-05-15 09:35:41 +07:00
|
|
|
} else {
|
|
|
|
/* We don't setup 64K shadow entry so far. */
|
|
|
|
WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
|
|
|
|
"suspicious 64K entry\n");
|
2018-03-01 14:49:59 +07:00
|
|
|
ppgtt_invalidate_pte(spt, se);
|
2018-05-15 09:35:41 +07:00
|
|
|
}
|
2018-03-01 14:49:59 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
fail:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
2017-08-14 14:24:14 +07:00
|
|
|
spt, se->val64, se->type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry *we, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
|
|
struct intel_gvt_gtt_entry m;
|
|
|
|
struct intel_vgpu_ppgtt_spt *s;
|
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
|
|
|
|
we->val64, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:44 +07:00
|
|
|
gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
|
|
|
|
we->type, index, we->val64);
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
if (gtt_type_is_pt(get_next_pt_type(we->type))) {
|
2018-01-30 18:19:50 +07:00
|
|
|
s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (IS_ERR(s)) {
|
|
|
|
ret = PTR_ERR(s);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ppgtt_get_shadow_entry(spt, &m, index);
|
|
|
|
ppgtt_generate_shadow_entry(&m, s, we);
|
|
|
|
ppgtt_set_shadow_entry(spt, &m, index);
|
|
|
|
} else {
|
2018-01-30 18:19:48 +07:00
|
|
|
ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
|
|
|
|
spt, we->val64, we->type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sync_oos_page(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_oos_page *oos_page)
|
|
|
|
{
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
|
2018-01-30 18:19:48 +07:00
|
|
|
struct intel_gvt_gtt_entry old, new;
|
2016-03-28 22:23:16 +07:00
|
|
|
int index;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_oos_change(vgpu->id, "sync", oos_page->id,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt, spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
old.type = new.type = get_entry_type(spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
old.val64 = new.val64 = 0;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
for (index = 0; index < (I915_GTT_PAGE_SIZE >>
|
|
|
|
info->gtt_entry_size_shift); index++) {
|
2016-03-28 22:23:16 +07:00
|
|
|
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
|
|
|
|
ops->get_entry(NULL, &new, index, true,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.gfn << PAGE_SHIFT, vgpu);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (old.val64 == new.val64
|
|
|
|
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
trace_oos_sync(vgpu->id, oos_page->id,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt, spt->guest_page.type,
|
2016-03-28 22:23:16 +07:00
|
|
|
new.val64, index);
|
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.write_cnt = 0;
|
2016-03-28 22:23:16 +07:00
|
|
|
list_del_init(&spt->post_shadow_list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int detach_oos_page(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_vgpu_oos_page *oos_page)
|
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
trace_oos_change(vgpu->id, "detach", oos_page->id,
|
2018-01-30 18:19:49 +07:00
|
|
|
spt, spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.write_cnt = 0;
|
|
|
|
spt->guest_page.oos_page = NULL;
|
|
|
|
oos_page->spt = NULL;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
list_del_init(&oos_page->vm_list);
|
|
|
|
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
|
|
|
|
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
|
2017-10-10 12:51:32 +07:00
|
|
|
oos_page->mem, I915_GTT_PAGE_SIZE);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
oos_page->spt = spt;
|
|
|
|
spt->guest_page.oos_page = oos_page;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
|
|
|
|
spt, spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:53 +07:00
|
|
|
ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
|
|
|
|
spt, spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
list_del_init(&oos_page->vm_list);
|
|
|
|
return sync_oos_page(spt->vgpu, oos_page);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt *gtt = &gvt->gtt;
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
|
|
|
|
|
|
|
|
if (list_empty(>t->oos_page_free_list_head)) {
|
|
|
|
oos_page = container_of(gtt->oos_page_use_list_head.next,
|
|
|
|
struct intel_vgpu_oos_page, list);
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_set_guest_page_sync(oos_page->spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = detach_oos_page(spt->vgpu, oos_page);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else
|
|
|
|
oos_page = container_of(gtt->oos_page_free_list_head.next,
|
|
|
|
struct intel_vgpu_oos_page, list);
|
2018-01-30 18:19:49 +07:00
|
|
|
return attach_oos_page(oos_page, spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
|
|
|
|
spt, spt->guest_page.type);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
|
2018-01-30 18:19:53 +07:00
|
|
|
return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is called before submitting a guest workload to host,
|
|
|
|
* to sync all the out-of-synced shadow for vGPU
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*/
|
|
|
|
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
struct intel_vgpu_oos_page *oos_page;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!enable_out_of_sync)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
|
|
|
|
oos_page = container_of(pos,
|
|
|
|
struct intel_vgpu_oos_page, vm_list);
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_set_guest_page_sync(oos_page->spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The heart of PPGTT shadow page table.
|
|
|
|
*/
|
|
|
|
static int ppgtt_handle_guest_write_page_table(
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry *we, unsigned long index)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
2017-08-14 14:24:14 +07:00
|
|
|
int type = spt->shadow_page.type;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
2018-01-30 18:19:48 +07:00
|
|
|
struct intel_gvt_gtt_entry old_se;
|
2016-11-07 09:44:36 +07:00
|
|
|
int new_present;
|
2018-05-15 09:35:41 +07:00
|
|
|
int i, ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
new_present = ops->test_present(we);
|
|
|
|
|
2017-08-14 14:24:14 +07:00
|
|
|
/*
|
|
|
|
* Adding the new entry first and then removing the old one, that can
|
|
|
|
* guarantee the ppgtt table is validated during the window between
|
|
|
|
* adding and removal.
|
|
|
|
*/
|
2018-01-30 18:19:48 +07:00
|
|
|
ppgtt_get_shadow_entry(spt, &old_se, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (new_present) {
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_handle_guest_entry_add(spt, we, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-08-14 14:24:14 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
|
2017-08-14 14:24:14 +07:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (!new_present) {
|
2018-05-15 09:35:41 +07:00
|
|
|
/* For 64KB splited entries, we need clear them all. */
|
|
|
|
if (ops->test_64k_splited(&old_se) &&
|
|
|
|
!(index % GTT_64K_PTE_STRIDE)) {
|
|
|
|
gvt_vdbg_mm("remove splited 64K shadow entries\n");
|
|
|
|
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
|
|
|
ops->clear_64k_splited(&old_se);
|
|
|
|
ops->set_pfn(&old_se,
|
|
|
|
vgpu->gtt.scratch_pt[type].page_mfn);
|
|
|
|
ppgtt_set_shadow_entry(spt, &old_se, index + i);
|
|
|
|
}
|
2018-05-15 09:35:43 +07:00
|
|
|
} else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
|
|
|
|
old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
|
|
|
|
ops->clear_pse(&old_se);
|
|
|
|
ops->set_pfn(&old_se,
|
|
|
|
vgpu->gtt.scratch_pt[type].page_mfn);
|
|
|
|
ppgtt_set_shadow_entry(spt, &old_se, index);
|
2018-05-15 09:35:41 +07:00
|
|
|
} else {
|
|
|
|
ops->set_pfn(&old_se,
|
|
|
|
vgpu->gtt.scratch_pt[type].page_mfn);
|
|
|
|
ppgtt_set_shadow_entry(spt, &old_se, index);
|
|
|
|
}
|
2017-08-14 14:24:14 +07:00
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
fail:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
|
|
|
spt, we->val64, we->type);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:48 +07:00
|
|
|
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
return enable_out_of_sync
|
2018-01-30 18:19:49 +07:00
|
|
|
&& gtt_type_is_pte_pt(spt->guest_page.type)
|
|
|
|
&& spt->guest_page.write_cnt >= 2;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
|
|
|
|
unsigned long index)
|
|
|
|
{
|
|
|
|
set_bit(index, spt->post_shadow_bitmap);
|
|
|
|
if (!list_empty(&spt->post_shadow_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_add_tail(&spt->post_shadow_list,
|
|
|
|
&spt->vgpu->gtt.post_shadow_list_head);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_flush_post_shadow - flush the post shadow transactions
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is called before submitting a guest workload to host,
|
|
|
|
* to flush all the post shadows for a vGPU.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*/
|
|
|
|
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt;
|
2016-11-07 09:44:36 +07:00
|
|
|
struct intel_gvt_gtt_entry ge;
|
2016-03-28 22:23:16 +07:00
|
|
|
unsigned long index;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
|
|
|
|
spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
|
|
|
|
post_shadow_list);
|
|
|
|
|
|
|
|
for_each_set_bit(index, spt->post_shadow_bitmap,
|
|
|
|
GTT_ENTRY_NUM_IN_ONE_PAGE) {
|
|
|
|
ppgtt_get_guest_entry(spt, &ge, index);
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_handle_guest_write_page_table(spt,
|
|
|
|
&ge, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
clear_bit(index, spt->post_shadow_bitmap);
|
|
|
|
}
|
|
|
|
list_del_init(&spt->post_shadow_list);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-29 01:47:55 +07:00
|
|
|
static int ppgtt_handle_guest_write_page_table_bytes(
|
2018-01-30 18:19:49 +07:00
|
|
|
struct intel_vgpu_ppgtt_spt *spt,
|
2016-03-28 22:23:16 +07:00
|
|
|
u64 pa, void *p_data, int bytes)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = spt->vgpu;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
2017-08-14 14:24:14 +07:00
|
|
|
struct intel_gvt_gtt_entry we, se;
|
2016-03-28 22:23:16 +07:00
|
|
|
unsigned long index;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
|
|
|
|
|
|
|
|
ppgtt_get_guest_entry(spt, &we, index);
|
|
|
|
|
2018-05-15 09:35:41 +07:00
|
|
|
/*
|
|
|
|
* For page table which has 64K gtt entry, only PTE#0, PTE#16,
|
|
|
|
* PTE#32, ... PTE#496 are used. Unused PTEs update should be
|
|
|
|
* ignored.
|
|
|
|
*/
|
|
|
|
if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
|
|
|
|
(index % GTT_64K_PTE_STRIDE)) {
|
|
|
|
gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
|
|
|
|
index);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (bytes == info->gtt_entry_size) {
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
2017-12-29 01:50:08 +07:00
|
|
|
int type = spt->shadow_page.type;
|
|
|
|
|
2017-08-14 14:24:14 +07:00
|
|
|
ppgtt_get_shadow_entry(spt, &se, index);
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-12-29 01:50:08 +07:00
|
|
|
ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
|
|
|
|
ppgtt_set_shadow_entry(spt, &se, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
ppgtt_set_post_shadow(spt, index);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!enable_out_of_sync)
|
|
|
|
return 0;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
spt->guest_page.write_cnt++;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
if (spt->guest_page.oos_page)
|
|
|
|
ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
|
2016-03-28 22:23:16 +07:00
|
|
|
false, 0, vgpu);
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
if (can_do_out_of_sync(spt)) {
|
|
|
|
if (!spt->guest_page.oos_page)
|
|
|
|
ppgtt_allocate_oos_page(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
ret = ppgtt_set_guest_page_oos(spt);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = mm->vgpu;
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt *gtt = &gvt->gtt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
|
|
|
|
struct intel_gvt_gtt_entry se;
|
2018-01-30 18:19:40 +07:00
|
|
|
int index;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
if (!mm->ppgtt_mm.shadowed)
|
2016-03-28 22:23:16 +07:00
|
|
|
return;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
|
|
|
|
ppgtt_get_shadow_root_entry(mm, &se, index);
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
if (!ops->test_present(&se))
|
|
|
|
continue;
|
2018-01-30 18:19:40 +07:00
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
|
2016-03-28 22:23:16 +07:00
|
|
|
se.val64 = 0;
|
2018-01-30 18:19:40 +07:00
|
|
|
ppgtt_set_shadow_root_entry(mm, &se, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_guest_change(vgpu->id, "destroy root pointer",
|
|
|
|
NULL, se.type, se.val64, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm->ppgtt_mm.shadowed = false;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
|
|
|
|
static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = mm->vgpu;
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt *gtt = &gvt->gtt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
|
|
|
|
struct intel_vgpu_ppgtt_spt *spt;
|
|
|
|
struct intel_gvt_gtt_entry ge, se;
|
2018-01-30 18:19:40 +07:00
|
|
|
int index, ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
if (mm->ppgtt_mm.shadowed)
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm->ppgtt_mm.shadowed = true;
|
|
|
|
|
|
|
|
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
|
|
|
|
ppgtt_get_guest_root_entry(mm, &ge, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (!ops->test_present(&ge))
|
|
|
|
continue;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_guest_change(vgpu->id, __func__, NULL,
|
|
|
|
ge.type, ge.val64, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:50 +07:00
|
|
|
spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (IS_ERR(spt)) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to populate guest root pointer\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
ret = PTR_ERR(spt);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ppgtt_generate_shadow_entry(&se, spt, &ge);
|
2018-01-30 18:19:40 +07:00
|
|
|
ppgtt_set_shadow_root_entry(mm, &se, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
trace_spt_guest_change(vgpu->id, "populate root pointer",
|
|
|
|
NULL, se.type, se.val64, index);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2018-01-30 18:19:40 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
fail:
|
2018-01-30 18:19:40 +07:00
|
|
|
invalidate_ppgtt_mm(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
|
|
|
|
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
|
|
|
if (!mm)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mm->vgpu = vgpu;
|
|
|
|
kref_init(&mm->ref);
|
|
|
|
atomic_set(&mm->pincount, 0);
|
|
|
|
|
|
|
|
return mm;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vgpu_free_mm(struct intel_vgpu_mm *mm)
|
|
|
|
{
|
|
|
|
kfree(mm);
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
/**
|
2018-01-30 18:19:40 +07:00
|
|
|
* intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
|
2016-03-28 22:23:16 +07:00
|
|
|
* @vgpu: a vGPU
|
2018-01-30 18:19:40 +07:00
|
|
|
* @root_entry_type: ppgtt root entry type
|
|
|
|
* @pdps: guest pdps.
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
2018-01-30 18:19:40 +07:00
|
|
|
* This function is used to create a ppgtt mm object for a vGPU.
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code in pointer if failed.
|
|
|
|
*/
|
2018-01-30 18:19:40 +07:00
|
|
|
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
2019-04-23 19:04:08 +07:00
|
|
|
enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm = vgpu_alloc_mm(vgpu);
|
|
|
|
if (!mm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm->type = INTEL_GVT_MM_PPGTT;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
|
|
|
|
root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
|
|
|
|
mm->ppgtt_mm.root_entry_type = root_entry_type;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
|
|
|
|
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
|
2020-05-08 10:14:09 +07:00
|
|
|
INIT_LIST_HEAD(&mm->ppgtt_mm.link);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
|
|
|
|
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
|
|
|
|
else
|
|
|
|
memcpy(mm->ppgtt_mm.guest_pdps, pdps,
|
|
|
|
sizeof(mm->ppgtt_mm.guest_pdps));
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
ret = shadow_ppgtt_mm(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret) {
|
2018-01-30 18:19:40 +07:00
|
|
|
gvt_vgpu_err("failed to shadow ppgtt mm\n");
|
|
|
|
vgpu_free_mm(mm);
|
|
|
|
return ERR_PTR(ret);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
|
2019-03-01 14:04:13 +07:00
|
|
|
|
|
|
|
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
2018-01-30 18:19:40 +07:00
|
|
|
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
return mm;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
unsigned long nr_entries;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm = vgpu_alloc_mm(vgpu);
|
|
|
|
if (!mm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
mm->type = INTEL_GVT_MM_GGTT;
|
|
|
|
|
|
|
|
nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
|
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 04:27:37 +07:00
|
|
|
mm->ggtt_mm.virtual_ggtt =
|
|
|
|
vzalloc(array_size(nr_entries,
|
|
|
|
vgpu->gvt->device_info.gtt_entry_size));
|
2018-01-30 18:19:40 +07:00
|
|
|
if (!mm->ggtt_mm.virtual_ggtt) {
|
|
|
|
vgpu_free_mm(mm);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2018-01-30 18:19:40 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
return mm;
|
2018-01-30 18:19:40 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-01-30 18:19:41 +07:00
|
|
|
* _intel_vgpu_mm_release - destroy a mm object
|
2018-01-30 18:19:40 +07:00
|
|
|
* @mm_ref: a kref object
|
|
|
|
*
|
|
|
|
* This function is used to destroy a mm object for vGPU
|
|
|
|
*
|
|
|
|
*/
|
2018-01-30 18:19:41 +07:00
|
|
|
void _intel_vgpu_mm_release(struct kref *mm_ref)
|
2018-01-30 18:19:40 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
|
|
|
|
|
|
|
|
if (GEM_WARN_ON(atomic_read(&mm->pincount)))
|
|
|
|
gvt_err("vgpu mm pin count bug detected\n");
|
|
|
|
|
|
|
|
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
|
|
|
list_del(&mm->ppgtt_mm.list);
|
2020-02-03 22:07:01 +07:00
|
|
|
|
|
|
|
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
2018-01-30 18:19:40 +07:00
|
|
|
list_del(&mm->ppgtt_mm.lru_list);
|
2020-02-03 22:07:01 +07:00
|
|
|
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
invalidate_ppgtt_mm(mm);
|
|
|
|
} else {
|
|
|
|
vfree(mm->ggtt_mm.virtual_ggtt);
|
|
|
|
}
|
|
|
|
|
|
|
|
vgpu_free_mm(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
|
|
|
|
* @mm: a vGPU mm object
|
|
|
|
*
|
|
|
|
* This function is called when user doesn't want to use a vGPU mm object
|
|
|
|
*/
|
|
|
|
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
|
|
|
|
{
|
2019-03-27 11:55:45 +07:00
|
|
|
atomic_dec_if_positive(&mm->pincount);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
|
2018-07-31 10:02:12 +07:00
|
|
|
* @mm: target vgpu mm
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
|
|
|
* This function is called when user wants to use a vGPU mm object. If this
|
|
|
|
* mm object hasn't been shadowed yet, the shadow will be populated at this
|
|
|
|
* time.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*/
|
|
|
|
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
atomic_inc(&mm->pincount);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
|
|
|
ret = shadow_ppgtt_mm(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-01-30 18:19:40 +07:00
|
|
|
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
2018-01-30 18:19:40 +07:00
|
|
|
list_move_tail(&mm->ppgtt_mm.lru_list,
|
|
|
|
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
|
|
|
|
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (atomic_read(&mm->pincount))
|
|
|
|
continue;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
list_del_init(&mm->ppgtt_mm.lru_list);
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
2018-01-30 18:19:40 +07:00
|
|
|
invalidate_ppgtt_mm(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 1;
|
|
|
|
}
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GMA translation APIs.
|
|
|
|
*/
|
|
|
|
static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
|
|
|
|
struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = mm->vgpu;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_vgpu_ppgtt_spt *s;
|
|
|
|
|
2018-01-30 18:19:49 +07:00
|
|
|
s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
|
2016-03-28 22:23:16 +07:00
|
|
|
if (!s)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (!guest)
|
|
|
|
ppgtt_get_shadow_entry(s, e, index);
|
|
|
|
else
|
|
|
|
ppgtt_get_guest_entry(s, e, index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_gma_to_gpa - translate a gma to GPA
|
|
|
|
* @mm: mm object. could be a PPGTT or GGTT mm object
|
|
|
|
* @gma: graphics memory address in this mm object
|
|
|
|
*
|
|
|
|
* This function is used to translate a graphics memory address in specific
|
|
|
|
* graphics memory space to guest physical address.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
|
|
|
|
*/
|
|
|
|
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
|
|
|
{
|
|
|
|
struct intel_vgpu *vgpu = mm->vgpu;
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
|
|
|
|
struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
|
|
|
|
unsigned long gpa = INTEL_GVT_INVALID_ADDR;
|
|
|
|
unsigned long gma_index[4];
|
|
|
|
struct intel_gvt_gtt_entry e;
|
2018-01-30 18:19:40 +07:00
|
|
|
int i, levels = 0;
|
2016-03-28 22:23:16 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
|
|
|
|
mm->type != INTEL_GVT_MM_PPGTT);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (mm->type == INTEL_GVT_MM_GGTT) {
|
|
|
|
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
|
|
|
goto err;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
ggtt_get_guest_entry(mm, &e,
|
|
|
|
gma_ops->gma_to_ggtt_pte_index(gma));
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
|
|
|
|
+ (gma & ~I915_GTT_PAGE_MASK);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
|
2018-01-30 18:19:40 +07:00
|
|
|
} else {
|
|
|
|
switch (mm->ppgtt_mm.root_entry_type) {
|
|
|
|
case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
|
|
|
|
ppgtt_get_shadow_root_entry(mm, &e, 0);
|
|
|
|
|
|
|
|
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
|
|
|
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
|
|
|
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
|
|
|
gma_index[3] = gma_ops->gma_to_pte_index(gma);
|
|
|
|
levels = 4;
|
|
|
|
break;
|
|
|
|
case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
|
|
|
|
ppgtt_get_shadow_root_entry(mm, &e,
|
|
|
|
gma_ops->gma_to_l3_pdp_index(gma));
|
|
|
|
|
|
|
|
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
|
|
|
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
|
|
|
levels = 2;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(1);
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
/* walk the shadow page table and get gpa from guest entry */
|
|
|
|
for (i = 0; i < levels; i++) {
|
|
|
|
ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
|
|
|
|
(i == levels - 1));
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-08-02 14:06:37 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
if (!pte_ops->test_present(&e)) {
|
|
|
|
gvt_dbg_core("GMA 0x%lx is not present\n", gma);
|
|
|
|
goto err;
|
|
|
|
}
|
2017-08-02 14:06:37 +07:00
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
|
|
|
|
(gma & ~I915_GTT_PAGE_MASK);
|
|
|
|
trace_gma_translate(vgpu->id, "ppgtt", 0,
|
|
|
|
mm->ppgtt_mm.root_entry_type, gma, gpa);
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
return gpa;
|
|
|
|
err:
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
|
2016-03-28 22:23:16 +07:00
|
|
|
return INTEL_GVT_INVALID_ADDR;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:45 +07:00
|
|
|
static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
|
2016-03-28 22:23:16 +07:00
|
|
|
unsigned int off, void *p_data, unsigned int bytes)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
unsigned long index = off >> info->gtt_entry_size_shift;
|
2019-05-27 12:45:51 +07:00
|
|
|
unsigned long gma;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_gvt_gtt_entry e;
|
|
|
|
|
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-05-27 12:45:51 +07:00
|
|
|
gma = index << I915_GTT_PAGE_SHIFT;
|
|
|
|
if (!intel_gvt_ggtt_validate_range(vgpu,
|
|
|
|
gma, 1 << I915_GTT_PAGE_SHIFT)) {
|
|
|
|
gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
|
|
|
|
memset(p_data, 0, bytes);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
ggtt_get_guest_entry(ggtt_mm, &e, index);
|
|
|
|
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
|
|
|
|
bytes);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
* @off: register offset
|
|
|
|
* @p_data: data will be returned to guest
|
|
|
|
* @bytes: data length
|
|
|
|
*
|
|
|
|
* This function is used to emulate the GTT MMIO register read
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, error code if failed.
|
|
|
|
*/
|
2018-01-30 18:19:45 +07:00
|
|
|
int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
|
2016-03-28 22:23:16 +07:00
|
|
|
void *p_data, unsigned int bytes)
|
|
|
|
{
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
off -= info->gtt_start_offset;
|
2018-01-30 18:19:45 +07:00
|
|
|
ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-27 14:35:14 +07:00
|
|
|
static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
|
|
|
|
struct intel_gvt_gtt_entry *entry)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
unsigned long pfn;
|
|
|
|
|
|
|
|
pfn = pte_ops->get_pfn(entry);
|
|
|
|
if (pfn != vgpu->gvt->gtt.scratch_mfn)
|
|
|
|
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
|
|
|
|
pfn << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:45 +07:00
|
|
|
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
2016-03-28 22:23:16 +07:00
|
|
|
void *p_data, unsigned int bytes)
|
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
const struct intel_gvt_device_info *info = &gvt->device_info;
|
|
|
|
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
|
|
|
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
|
|
|
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
|
2018-03-01 14:49:59 +07:00
|
|
|
unsigned long gma, gfn;
|
2019-05-23 05:18:36 +07:00
|
|
|
struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
|
|
|
struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
2018-03-01 14:49:59 +07:00
|
|
|
dma_addr_t dma_addr;
|
|
|
|
int ret;
|
2018-09-19 13:42:10 +07:00
|
|
|
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
|
|
|
|
bool partial_update = false;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-10-10 12:51:32 +07:00
|
|
|
gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
/* the VM may configure the whole GM space when ballooning is used */
|
2017-02-21 14:54:56 +07:00
|
|
|
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-09-19 13:42:10 +07:00
|
|
|
e.type = GTT_TYPE_GGTT_PTE;
|
2016-03-28 22:23:16 +07:00
|
|
|
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
|
|
|
|
bytes);
|
|
|
|
|
drm/i915/gvt: fix a bug of partially write ggtt enties
when guest writes ggtt entries, it could write 8 bytes a time if
gtt_entry_size is 8. But, qemu could split the 8 bytes into 2 consecutive
4-byte writes.
If each 4-byte partial write could trigger a host ggtt write, it is very
possible that a wrong combination is written to the host ggtt. E.g.
the higher 4 bytes is the old value, but the lower 4 bytes is the new
value, and this 8-byte combination is wrong but written to the ggtt, thus
causing bugs.
To handle this condition, we just record the first 4-byte write, then wait
until the second 4-byte write comes and write the combined 64-bit data to
host ggtt table.
To save memory space and to spot partial write as early as possible, we
don't keep this information for every ggtt index. Instread, we just record
the last ggtt write position, and assume the two 4-byte writes come in
consecutively for each vgpu.
This assumption is right based on the characteristic of ggtt entry which
stores memory address. When gtt_entry_size is 8, the guest memory physical
address should be 64 bits, so any sane guest driver should write 8-byte
long data at a time, so 2 consecutive 4-byte writes at the same ggtt index
should be trapped in gvt.
v2:
when incomplete ggtt entry write is located, e.g.
1. guest only writes 4 bytes at a ggtt offset and no long writes the
rest 4 bytes.
2. guest writes 4 bytes of a ggtt offset, then write at other ggtt
offsets, then return back to write the left 4 bytes of the first
ggtt offset.
add error handling logic to remap host entry to scratch page, and mark
guest virtual ggtt entry as not present. (zhenyu wang)
Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-06-19 14:44:11 +07:00
|
|
|
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
|
2018-09-19 13:42:10 +07:00
|
|
|
* write, save the first 4 bytes in a list and update virtual
|
|
|
|
* PTE. Only update shadow PTE when the second 4 bytes comes.
|
drm/i915/gvt: fix a bug of partially write ggtt enties
when guest writes ggtt entries, it could write 8 bytes a time if
gtt_entry_size is 8. But, qemu could split the 8 bytes into 2 consecutive
4-byte writes.
If each 4-byte partial write could trigger a host ggtt write, it is very
possible that a wrong combination is written to the host ggtt. E.g.
the higher 4 bytes is the old value, but the lower 4 bytes is the new
value, and this 8-byte combination is wrong but written to the ggtt, thus
causing bugs.
To handle this condition, we just record the first 4-byte write, then wait
until the second 4-byte write comes and write the combined 64-bit data to
host ggtt table.
To save memory space and to spot partial write as early as possible, we
don't keep this information for every ggtt index. Instread, we just record
the last ggtt write position, and assume the two 4-byte writes come in
consecutively for each vgpu.
This assumption is right based on the characteristic of ggtt entry which
stores memory address. When gtt_entry_size is 8, the guest memory physical
address should be 64 bits, so any sane guest driver should write 8-byte
long data at a time, so 2 consecutive 4-byte writes at the same ggtt index
should be trapped in gvt.
v2:
when incomplete ggtt entry write is located, e.g.
1. guest only writes 4 bytes at a ggtt offset and no long writes the
rest 4 bytes.
2. guest writes 4 bytes of a ggtt offset, then write at other ggtt
offsets, then return back to write the left 4 bytes of the first
ggtt offset.
add error handling logic to remap host entry to scratch page, and mark
guest virtual ggtt entry as not present. (zhenyu wang)
Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-06-19 14:44:11 +07:00
|
|
|
*/
|
|
|
|
if (bytes < info->gtt_entry_size) {
|
2018-09-19 13:42:10 +07:00
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pos, n,
|
|
|
|
&ggtt_mm->ggtt_mm.partial_pte_list, list) {
|
|
|
|
if (g_gtt_index == pos->offset >>
|
|
|
|
info->gtt_entry_size_shift) {
|
|
|
|
if (off != pos->offset) {
|
|
|
|
/* the second partial part*/
|
|
|
|
int last_off = pos->offset &
|
|
|
|
(info->gtt_entry_size - 1);
|
|
|
|
|
|
|
|
memcpy((void *)&e.val64 + last_off,
|
|
|
|
(void *)&pos->data + last_off,
|
|
|
|
bytes);
|
|
|
|
|
|
|
|
list_del(&pos->list);
|
|
|
|
kfree(pos);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update of the first partial part */
|
|
|
|
pos->data = e.val64;
|
|
|
|
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
drm/i915/gvt: fix a bug of partially write ggtt enties
when guest writes ggtt entries, it could write 8 bytes a time if
gtt_entry_size is 8. But, qemu could split the 8 bytes into 2 consecutive
4-byte writes.
If each 4-byte partial write could trigger a host ggtt write, it is very
possible that a wrong combination is written to the host ggtt. E.g.
the higher 4 bytes is the old value, but the lower 4 bytes is the new
value, and this 8-byte combination is wrong but written to the ggtt, thus
causing bugs.
To handle this condition, we just record the first 4-byte write, then wait
until the second 4-byte write comes and write the combined 64-bit data to
host ggtt table.
To save memory space and to spot partial write as early as possible, we
don't keep this information for every ggtt index. Instread, we just record
the last ggtt write position, and assume the two 4-byte writes come in
consecutively for each vgpu.
This assumption is right based on the characteristic of ggtt entry which
stores memory address. When gtt_entry_size is 8, the guest memory physical
address should be 64 bits, so any sane guest driver should write 8-byte
long data at a time, so 2 consecutive 4-byte writes at the same ggtt index
should be trapped in gvt.
v2:
when incomplete ggtt entry write is located, e.g.
1. guest only writes 4 bytes at a ggtt offset and no long writes the
rest 4 bytes.
2. guest writes 4 bytes of a ggtt offset, then write at other ggtt
offsets, then return back to write the left 4 bytes of the first
ggtt offset.
add error handling logic to remap host entry to scratch page, and mark
guest virtual ggtt entry as not present. (zhenyu wang)
Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-06-19 14:44:11 +07:00
|
|
|
|
2018-09-19 13:42:10 +07:00
|
|
|
if (!found) {
|
|
|
|
/* the first partial part */
|
|
|
|
partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
|
|
|
|
if (!partial_pte)
|
|
|
|
return -ENOMEM;
|
|
|
|
partial_pte->offset = off;
|
|
|
|
partial_pte->data = e.val64;
|
|
|
|
list_add_tail(&partial_pte->list,
|
|
|
|
&ggtt_mm->ggtt_mm.partial_pte_list);
|
|
|
|
partial_update = true;
|
drm/i915/gvt: fix a bug of partially write ggtt enties
when guest writes ggtt entries, it could write 8 bytes a time if
gtt_entry_size is 8. But, qemu could split the 8 bytes into 2 consecutive
4-byte writes.
If each 4-byte partial write could trigger a host ggtt write, it is very
possible that a wrong combination is written to the host ggtt. E.g.
the higher 4 bytes is the old value, but the lower 4 bytes is the new
value, and this 8-byte combination is wrong but written to the ggtt, thus
causing bugs.
To handle this condition, we just record the first 4-byte write, then wait
until the second 4-byte write comes and write the combined 64-bit data to
host ggtt table.
To save memory space and to spot partial write as early as possible, we
don't keep this information for every ggtt index. Instread, we just record
the last ggtt write position, and assume the two 4-byte writes come in
consecutively for each vgpu.
This assumption is right based on the characteristic of ggtt entry which
stores memory address. When gtt_entry_size is 8, the guest memory physical
address should be 64 bits, so any sane guest driver should write 8-byte
long data at a time, so 2 consecutive 4-byte writes at the same ggtt index
should be trapped in gvt.
v2:
when incomplete ggtt entry write is located, e.g.
1. guest only writes 4 bytes at a ggtt offset and no long writes the
rest 4 bytes.
2. guest writes 4 bytes of a ggtt offset, then write at other ggtt
offsets, then return back to write the left 4 bytes of the first
ggtt offset.
add error handling logic to remap host entry to scratch page, and mark
guest virtual ggtt entry as not present. (zhenyu wang)
Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
2018-06-19 14:44:11 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-19 13:42:10 +07:00
|
|
|
if (!partial_update && (ops->test_present(&e))) {
|
2017-12-22 17:06:31 +07:00
|
|
|
gfn = ops->get_pfn(&e);
|
2019-05-23 05:18:36 +07:00
|
|
|
m.val64 = e.val64;
|
|
|
|
m.type = e.type;
|
2017-12-22 17:06:31 +07:00
|
|
|
|
|
|
|
/* one PTE update may be issued in multiple writes and the
|
|
|
|
* first write may not construct a valid gfn
|
|
|
|
*/
|
|
|
|
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
|
|
|
|
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-03-01 14:49:59 +07:00
|
|
|
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
|
2018-05-15 09:35:42 +07:00
|
|
|
PAGE_SIZE, &dma_addr);
|
2018-03-01 14:49:59 +07:00
|
|
|
if (ret) {
|
2018-01-30 18:19:48 +07:00
|
|
|
gvt_vgpu_err("fail to populate guest ggtt entry\n");
|
2017-03-21 09:54:21 +07:00
|
|
|
/* guest driver may read/write the entry when partial
|
|
|
|
* update the entry in this situation p2m will fail
|
|
|
|
* settting the shadow entry to point to a scratch page
|
|
|
|
*/
|
2017-10-10 13:34:11 +07:00
|
|
|
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
2018-01-30 18:19:48 +07:00
|
|
|
} else
|
2018-03-01 14:49:59 +07:00
|
|
|
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
|
2018-03-27 14:35:14 +07:00
|
|
|
} else {
|
2017-10-10 13:34:11 +07:00
|
|
|
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
2018-03-27 14:35:14 +07:00
|
|
|
ops->clear_present(&m);
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2017-12-22 17:06:31 +07:00
|
|
|
out:
|
2018-09-19 13:42:09 +07:00
|
|
|
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
|
|
|
|
|
|
|
|
ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
|
|
|
|
ggtt_invalidate_pte(vgpu, &e);
|
|
|
|
|
2018-01-30 18:19:42 +07:00
|
|
|
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
|
2020-03-06 09:08:10 +07:00
|
|
|
ggtt_invalidate(gvt->gt);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-01-30 18:19:45 +07:00
|
|
|
* intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
|
2016-03-28 22:23:16 +07:00
|
|
|
* @vgpu: a vGPU
|
|
|
|
* @off: register offset
|
|
|
|
* @p_data: data from guest write
|
|
|
|
* @bytes: data length
|
|
|
|
*
|
|
|
|
* This function is used to emulate the GTT MMIO register write
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, error code if failed.
|
|
|
|
*/
|
2018-01-30 18:19:45 +07:00
|
|
|
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
|
|
|
|
unsigned int off, void *p_data, unsigned int bytes)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
|
|
|
int ret;
|
2020-04-17 16:13:34 +07:00
|
|
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
int i;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
off -= info->gtt_start_offset;
|
2018-01-30 18:19:45 +07:00
|
|
|
ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
|
2020-04-17 16:13:34 +07:00
|
|
|
|
|
|
|
/* if ggtt of last submitted context is written,
|
|
|
|
* that context is probably got unpinned.
|
|
|
|
* Set last shadowed ctx to invalid.
|
|
|
|
*/
|
|
|
|
for_each_engine(engine, vgpu->gvt->gt, i) {
|
|
|
|
if (!s->last_ctx[i].valid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
|
|
|
|
s->last_ctx[i].valid = false;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
2019-04-23 19:04:08 +07:00
|
|
|
enum intel_gvt_gtt_type type)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
2016-03-28 22:23:16 +07:00
|
|
|
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
2016-11-04 12:47:35 +07:00
|
|
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
2017-11-02 16:44:52 +07:00
|
|
|
int page_entry_num = I915_GTT_PAGE_SIZE >>
|
2016-11-04 12:47:35 +07:00
|
|
|
vgpu->gvt->device_info.gtt_entry_size_shift;
|
2017-01-09 14:38:38 +07:00
|
|
|
void *scratch_pt;
|
2016-11-04 12:47:35 +07:00
|
|
|
int i;
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
2017-02-09 10:37:11 +07:00
|
|
|
dma_addr_t daddr;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2020-02-20 23:55:07 +07:00
|
|
|
if (drm_WARN_ON(&i915->drm,
|
|
|
|
type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
2016-11-04 12:47:35 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-09 14:38:38 +07:00
|
|
|
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
2016-11-04 12:47:35 +07:00
|
|
|
if (!scratch_pt) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to allocate scratch page\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-02-09 10:37:11 +07:00
|
|
|
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
|
|
|
|
4096, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
if (dma_mapping_error(dev, daddr)) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to dmamap scratch_pt\n");
|
2017-02-09 10:37:11 +07:00
|
|
|
__free_page(virt_to_page(scratch_pt));
|
|
|
|
return -ENOMEM;
|
2016-11-04 12:47:35 +07:00
|
|
|
}
|
2017-02-09 10:37:11 +07:00
|
|
|
gtt->scratch_pt[type].page_mfn =
|
2017-11-02 16:44:52 +07:00
|
|
|
(unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
|
2017-01-09 14:38:38 +07:00
|
|
|
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
2016-11-04 12:47:35 +07:00
|
|
|
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
2017-02-09 10:37:11 +07:00
|
|
|
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
|
2016-11-04 12:47:35 +07:00
|
|
|
|
|
|
|
/* Build the tree by full filled the scratch pt with the entries which
|
|
|
|
* point to the next level scratch pt or scratch page. The
|
|
|
|
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
|
|
|
* 'type' pt.
|
|
|
|
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
2017-01-09 14:38:38 +07:00
|
|
|
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
2016-11-04 12:47:35 +07:00
|
|
|
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
|
|
|
*/
|
2018-06-07 21:48:41 +07:00
|
|
|
if (type > GTT_TYPE_PPGTT_PTE_PT) {
|
2016-11-04 12:47:35 +07:00
|
|
|
struct intel_gvt_gtt_entry se;
|
|
|
|
|
|
|
|
memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
|
|
|
|
se.type = get_entry_type(type - 1);
|
|
|
|
ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
|
|
|
|
|
|
|
|
/* The entry parameters like present/writeable/cache type
|
|
|
|
* set to the same as i915's scratch page tree.
|
|
|
|
*/
|
|
|
|
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
|
|
|
|
if (type == GTT_TYPE_PPGTT_PDE_PT)
|
2017-09-14 19:39:41 +07:00
|
|
|
se.val64 |= PPAT_CACHED;
|
2016-11-04 12:47:35 +07:00
|
|
|
|
|
|
|
for (i = 0; i < page_entry_num; i++)
|
2017-01-09 14:38:38 +07:00
|
|
|
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
|
2016-11-04 12:47:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
int i;
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
2017-02-09 10:37:11 +07:00
|
|
|
dma_addr_t daddr;
|
2016-11-04 12:47:35 +07:00
|
|
|
|
|
|
|
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
|
|
|
if (vgpu->gtt.scratch_pt[i].page != NULL) {
|
2017-02-09 10:37:11 +07:00
|
|
|
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
|
2017-11-02 16:44:52 +07:00
|
|
|
I915_GTT_PAGE_SHIFT);
|
2017-02-09 10:37:11 +07:00
|
|
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
2016-11-04 12:47:35 +07:00
|
|
|
__free_page(vgpu->gtt.scratch_pt[i].page);
|
|
|
|
vgpu->gtt.scratch_pt[i].page = NULL;
|
|
|
|
vgpu->gtt.scratch_pt[i].page_mfn = 0;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
static int create_scratch_page_tree(struct intel_vgpu *vgpu)
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
2016-11-04 12:47:35 +07:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
|
|
|
ret = alloc_scratch_pages(vgpu, i);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2016-11-04 12:47:35 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
release_scratch_page_tree(vgpu);
|
|
|
|
return ret;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is used to initialize per-vGPU graphics memory virtualization
|
|
|
|
* components.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, error code if failed.
|
|
|
|
*/
|
|
|
|
int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
|
|
|
|
2018-01-30 18:19:54 +07:00
|
|
|
INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
|
2016-03-28 22:23:16 +07:00
|
|
|
INIT_LIST_HEAD(>t->oos_page_list_head);
|
|
|
|
INIT_LIST_HEAD(>t->post_shadow_list_head);
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
|
|
|
|
if (IS_ERR(gtt->ggtt_mm)) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to create mm for ggtt.\n");
|
2018-01-30 18:19:40 +07:00
|
|
|
return PTR_ERR(gtt->ggtt_mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-03-27 14:35:15 +07:00
|
|
|
intel_vgpu_reset_ggtt(vgpu, false);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-09-19 13:42:10 +07:00
|
|
|
INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
|
|
|
|
|
2016-11-04 12:47:35 +07:00
|
|
|
return create_scratch_page_tree(vgpu);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
|
2017-02-21 14:52:56 +07:00
|
|
|
{
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
|
|
|
|
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
2018-01-30 18:19:41 +07:00
|
|
|
intel_vgpu_destroy_mm(mm);
|
2017-02-21 14:52:56 +07:00
|
|
|
}
|
2018-01-30 18:19:40 +07:00
|
|
|
|
|
|
|
if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
|
2018-03-12 18:43:58 +07:00
|
|
|
gvt_err("vgpu ppgtt mm is not fully destroyed\n");
|
2018-01-30 18:19:40 +07:00
|
|
|
|
2018-01-30 18:19:54 +07:00
|
|
|
if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
|
2018-01-30 18:19:40 +07:00
|
|
|
gvt_err("Why we still has spt not freed?\n");
|
2018-01-30 18:19:50 +07:00
|
|
|
ppgtt_free_all_spt(vgpu);
|
2018-01-30 18:19:40 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2018-11-21 03:24:38 +07:00
|
|
|
struct intel_gvt_partial_pte *pos, *next;
|
2018-09-19 13:42:10 +07:00
|
|
|
|
2018-11-21 03:24:38 +07:00
|
|
|
list_for_each_entry_safe(pos, next,
|
|
|
|
&vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
|
|
|
|
list) {
|
2018-09-19 13:42:10 +07:00
|
|
|
gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
|
|
|
|
pos->offset, pos->data);
|
|
|
|
kfree(pos);
|
|
|
|
}
|
2018-01-30 18:19:41 +07:00
|
|
|
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
|
2018-01-30 18:19:40 +07:00
|
|
|
vgpu->gtt.ggtt_mm = NULL;
|
2017-02-21 14:52:56 +07:00
|
|
|
}
|
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
/**
|
|
|
|
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is used to clean up per-vGPU graphics memory virtualization
|
|
|
|
* components.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, error code if failed.
|
|
|
|
*/
|
|
|
|
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
|
|
|
|
{
|
2018-01-30 18:19:40 +07:00
|
|
|
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
|
|
|
|
intel_vgpu_destroy_ggtt_mm(vgpu);
|
2016-11-04 12:47:35 +07:00
|
|
|
release_scratch_page_tree(vgpu);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void clean_spt_oos(struct intel_gvt *gvt)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt *gtt = &gvt->gtt;
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
struct intel_vgpu_oos_page *oos_page;
|
|
|
|
|
|
|
|
WARN(!list_empty(>t->oos_page_use_list_head),
|
|
|
|
"someone is still using oos page\n");
|
|
|
|
|
|
|
|
list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
|
|
|
|
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
|
|
|
|
list_del(&oos_page->list);
|
2019-02-20 11:07:45 +07:00
|
|
|
free_page((unsigned long)oos_page->mem);
|
2016-03-28 22:23:16 +07:00
|
|
|
kfree(oos_page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_spt_oos(struct intel_gvt *gvt)
|
|
|
|
{
|
|
|
|
struct intel_gvt_gtt *gtt = &gvt->gtt;
|
|
|
|
struct intel_vgpu_oos_page *oos_page;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(>t->oos_page_free_list_head);
|
|
|
|
INIT_LIST_HEAD(>t->oos_page_use_list_head);
|
|
|
|
|
|
|
|
for (i = 0; i < preallocated_oos_pages; i++) {
|
|
|
|
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
|
|
|
|
if (!oos_page) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-02-20 11:07:45 +07:00
|
|
|
oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
|
|
|
|
if (!oos_page->mem) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
kfree(oos_page);
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&oos_page->list);
|
|
|
|
INIT_LIST_HEAD(&oos_page->vm_list);
|
|
|
|
oos_page->id = i;
|
|
|
|
list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
|
|
|
|
}
|
|
|
|
|
|
|
|
gvt_dbg_mm("%d oos pages preallocated\n", i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
clean_spt_oos(gvt);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
|
|
|
|
* @vgpu: a vGPU
|
2018-07-31 10:02:12 +07:00
|
|
|
* @pdps: pdp root array
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
|
|
|
* This function is used to find a PPGTT mm object from mm object pool
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* pointer to mm object on success, NULL if failed.
|
|
|
|
*/
|
|
|
|
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
2018-01-30 18:19:40 +07:00
|
|
|
u64 pdps[])
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
2018-01-30 18:19:40 +07:00
|
|
|
struct list_head *pos;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
|
|
|
|
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
switch (mm->ppgtt_mm.root_entry_type) {
|
|
|
|
case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
|
|
|
|
if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
|
2016-03-28 22:23:16 +07:00
|
|
|
return mm;
|
2018-01-30 18:19:40 +07:00
|
|
|
break;
|
|
|
|
case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
|
|
|
|
if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
|
|
|
|
sizeof(mm->ppgtt_mm.guest_pdps)))
|
2016-03-28 22:23:16 +07:00
|
|
|
return mm;
|
2018-01-30 18:19:40 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
GEM_BUG_ON(1);
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-01-30 18:19:46 +07:00
|
|
|
* intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
|
2016-03-28 22:23:16 +07:00
|
|
|
* @vgpu: a vGPU
|
2018-01-30 18:19:40 +07:00
|
|
|
* @root_entry_type: ppgtt root entry type
|
|
|
|
* @pdps: guest pdps
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
2018-01-30 18:19:46 +07:00
|
|
|
* This function is used to find or create a PPGTT mm object from a guest.
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*/
|
2018-01-30 18:19:46 +07:00
|
|
|
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
|
2019-04-23 19:04:08 +07:00
|
|
|
enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (mm) {
|
2018-01-30 18:19:41 +07:00
|
|
|
intel_vgpu_mm_get(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
} else {
|
2018-01-30 18:19:40 +07:00
|
|
|
mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
|
2018-01-30 18:19:46 +07:00
|
|
|
if (IS_ERR(mm))
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to create mm\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
2018-01-30 18:19:46 +07:00
|
|
|
return mm;
|
2016-03-28 22:23:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-01-30 18:19:46 +07:00
|
|
|
* intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
|
2016-03-28 22:23:16 +07:00
|
|
|
* @vgpu: a vGPU
|
2018-01-30 18:19:40 +07:00
|
|
|
* @pdps: guest pdps
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
2018-01-30 18:19:46 +07:00
|
|
|
* This function is used to find a PPGTT mm object from a guest and destroy it.
|
2016-03-28 22:23:16 +07:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Zero on success, negative error code if failed.
|
|
|
|
*/
|
2018-01-30 18:19:46 +07:00
|
|
|
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
|
2016-03-28 22:23:16 +07:00
|
|
|
{
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
|
2018-01-30 18:19:40 +07:00
|
|
|
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
|
2016-03-28 22:23:16 +07:00
|
|
|
if (!mm) {
|
2017-03-10 16:26:53 +07:00
|
|
|
gvt_vgpu_err("fail to find ppgtt instance.\n");
|
2016-03-28 22:23:16 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-01-30 18:19:41 +07:00
|
|
|
intel_vgpu_mm_put(mm);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_gvt_init_gtt - initialize mm components of a GVT device
|
|
|
|
* @gvt: GVT device
|
|
|
|
*
|
|
|
|
* This function is called at the initialization stage, to initialize
|
|
|
|
* the mm components of a GVT device.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* zero on success, negative error code if failed.
|
|
|
|
*/
|
|
|
|
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
|
|
|
{
|
|
|
|
int ret;
|
2017-01-09 14:38:38 +07:00
|
|
|
void *page;
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *dev = &gvt->gt->i915->drm.pdev->dev;
|
2017-02-09 10:37:11 +07:00
|
|
|
dma_addr_t daddr;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
|
|
|
gvt_dbg_core("init gtt\n");
|
|
|
|
|
2018-06-11 14:39:32 +07:00
|
|
|
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
|
|
|
|
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
|
2016-03-28 22:23:16 +07:00
|
|
|
|
2017-01-09 14:38:38 +07:00
|
|
|
page = (void *)get_zeroed_page(GFP_KERNEL);
|
|
|
|
if (!page) {
|
2016-12-08 09:14:48 +07:00
|
|
|
gvt_err("fail to allocate scratch ggtt page\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-02-09 10:37:11 +07:00
|
|
|
daddr = dma_map_page(dev, virt_to_page(page), 0,
|
|
|
|
4096, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
if (dma_mapping_error(dev, daddr)) {
|
|
|
|
gvt_err("fail to dmamap scratch ggtt page\n");
|
|
|
|
__free_page(virt_to_page(page));
|
|
|
|
return -ENOMEM;
|
2016-12-08 09:14:48 +07:00
|
|
|
}
|
2017-10-10 13:34:11 +07:00
|
|
|
|
|
|
|
gvt->gtt.scratch_page = virt_to_page(page);
|
|
|
|
gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
|
2016-12-08 09:14:48 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
if (enable_out_of_sync) {
|
|
|
|
ret = setup_spt_oos(gvt);
|
|
|
|
if (ret) {
|
|
|
|
gvt_err("fail to initialize SPT oos\n");
|
2017-07-04 14:47:00 +07:00
|
|
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
2017-10-10 13:34:11 +07:00
|
|
|
__free_page(gvt->gtt.scratch_page);
|
2016-03-28 22:23:16 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2018-01-30 18:19:40 +07:00
|
|
|
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_init(&gvt->gtt.ppgtt_mm_lock);
|
2016-03-28 22:23:16 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_gvt_clean_gtt - clean up mm components of a GVT device
|
|
|
|
* @gvt: GVT device
|
|
|
|
*
|
|
|
|
* This function is called at the driver unloading stage, to clean up the
|
|
|
|
* the mm components of a GVT device.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
|
|
|
{
|
2020-03-06 09:08:10 +07:00
|
|
|
struct device *dev = &gvt->gt->i915->drm.pdev->dev;
|
2017-10-10 13:34:11 +07:00
|
|
|
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
|
2017-10-10 12:51:32 +07:00
|
|
|
I915_GTT_PAGE_SHIFT);
|
2017-02-09 10:37:11 +07:00
|
|
|
|
|
|
|
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
|
|
|
|
|
2017-10-10 13:34:11 +07:00
|
|
|
__free_page(gvt->gtt.scratch_page);
|
2016-12-08 09:14:48 +07:00
|
|
|
|
2016-03-28 22:23:16 +07:00
|
|
|
if (enable_out_of_sync)
|
|
|
|
clean_spt_oos(gvt);
|
|
|
|
}
|
2016-12-08 09:14:48 +07:00
|
|
|
|
2018-02-07 17:12:14 +07:00
|
|
|
/**
|
|
|
|
* intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is called when invalidate all PPGTT instances of a vGPU.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
|
|
|
|
{
|
|
|
|
struct list_head *pos, *n;
|
|
|
|
struct intel_vgpu_mm *mm;
|
|
|
|
|
|
|
|
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
|
|
|
|
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
|
|
|
|
if (mm->type == INTEL_GVT_MM_PPGTT) {
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
2018-02-07 17:12:14 +07:00
|
|
|
list_del_init(&mm->ppgtt_mm.lru_list);
|
2019-03-01 14:04:13 +07:00
|
|
|
mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
|
2018-02-07 17:12:14 +07:00
|
|
|
if (mm->ppgtt_mm.shadowed)
|
|
|
|
invalidate_ppgtt_mm(mm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:14:48 +07:00
|
|
|
/**
|
|
|
|
* intel_vgpu_reset_ggtt - reset the GGTT entry
|
|
|
|
* @vgpu: a vGPU
|
2018-03-27 14:35:15 +07:00
|
|
|
* @invalidate_old: invalidate old entries
|
2016-12-08 09:14:48 +07:00
|
|
|
*
|
|
|
|
* This function is called at the vGPU create stage
|
|
|
|
* to reset all the GGTT entries.
|
|
|
|
*
|
|
|
|
*/
|
2018-03-27 14:35:15 +07:00
|
|
|
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
|
2016-12-08 09:14:48 +07:00
|
|
|
{
|
|
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
2018-01-30 18:19:43 +07:00
|
|
|
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
|
|
|
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
|
2018-03-27 14:35:15 +07:00
|
|
|
struct intel_gvt_gtt_entry old_entry;
|
2016-12-08 09:14:48 +07:00
|
|
|
u32 index;
|
|
|
|
u32 num_entries;
|
|
|
|
|
2018-01-30 18:19:43 +07:00
|
|
|
pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
|
|
|
|
pte_ops->set_present(&entry);
|
2016-12-08 09:14:48 +07:00
|
|
|
|
|
|
|
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
|
|
|
|
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
|
2018-03-27 14:35:15 +07:00
|
|
|
while (num_entries--) {
|
|
|
|
if (invalidate_old) {
|
|
|
|
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
|
|
|
|
ggtt_invalidate_pte(vgpu, &old_entry);
|
|
|
|
}
|
2018-01-30 18:19:43 +07:00
|
|
|
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
|
2018-03-27 14:35:15 +07:00
|
|
|
}
|
2016-12-08 09:14:48 +07:00
|
|
|
|
|
|
|
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
|
|
|
|
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
|
2018-03-27 14:35:15 +07:00
|
|
|
while (num_entries--) {
|
|
|
|
if (invalidate_old) {
|
|
|
|
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
|
|
|
|
ggtt_invalidate_pte(vgpu, &old_entry);
|
|
|
|
}
|
2018-01-30 18:19:43 +07:00
|
|
|
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
|
2018-03-27 14:35:15 +07:00
|
|
|
}
|
2017-04-12 15:24:57 +07:00
|
|
|
|
2020-03-06 09:08:10 +07:00
|
|
|
ggtt_invalidate(gvt->gt);
|
2016-12-08 09:14:48 +07:00
|
|
|
}
|
2017-01-13 10:15:57 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_vgpu_reset_gtt - reset the all GTT related status
|
|
|
|
* @vgpu: a vGPU
|
|
|
|
*
|
|
|
|
* This function is called from vfio core to reset reset all
|
|
|
|
* GTT related status, including GGTT, PPGTT, scratch page.
|
|
|
|
*
|
|
|
|
*/
|
2017-08-04 12:08:59 +07:00
|
|
|
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
|
2017-01-13 10:15:57 +07:00
|
|
|
{
|
2017-02-21 14:52:56 +07:00
|
|
|
/* Shadow pages are only created when there is no page
|
|
|
|
* table tracking data, so remove page tracking data after
|
|
|
|
* removing the shadow pages.
|
|
|
|
*/
|
2018-01-30 18:19:40 +07:00
|
|
|
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
|
2018-03-27 14:35:15 +07:00
|
|
|
intel_vgpu_reset_ggtt(vgpu, true);
|
2017-01-13 10:15:57 +07:00
|
|
|
}
|