mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 19:13:18 +07:00
drm/i915/gt: Fill all the unused space in the GGTT
When we allocate space in the GGTT we may have to allocate a larger
region than will be populated by the object to accommodate fencing. Make
sure that this space beyond the end of the buffer points safely into
scratch space, in case the HW tries to access it anyway (e.g. fenced
access to the last tile row).
v2: Preemptively / conservatively guard gen6 ggtt as well.
Reported-by: Imre Deak <imre.deak@intel.com>
References: https://gitlab.freedesktop.org/drm/intel/-/issues/1554
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200331152348.26946-1-chris@chris-wilson.co.uk
(cherry picked from commit 4d6c185908
)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
8262b49209
commit
0b72a251bf
@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
struct sgt_iter sgt_iter;
|
||||
gen8_pte_t __iomem *gtt_entries;
|
||||
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
gen8_pte_t __iomem *gte;
|
||||
gen8_pte_t __iomem *end;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t addr;
|
||||
|
||||
/*
|
||||
@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
* not to allow the user to override access to a read only page.
|
||||
*/
|
||||
|
||||
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
|
||||
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
|
||||
for_each_sgt_daddr(addr, sgt_iter, vma->pages)
|
||||
gen8_set_pte(gtt_entries++, pte_encode | addr);
|
||||
gte = (gen8_pte_t __iomem *)ggtt->gsm;
|
||||
gte += vma->node.start / I915_GTT_PAGE_SIZE;
|
||||
end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
|
||||
|
||||
for_each_sgt_daddr(addr, iter, vma->pages)
|
||||
gen8_set_pte(gte++, pte_encode | addr);
|
||||
GEM_BUG_ON(gte > end);
|
||||
|
||||
/* Fill the allocated but "unused" space beyond the end of the buffer */
|
||||
while (gte < end)
|
||||
gen8_set_pte(gte++, vm->scratch[0].encode);
|
||||
|
||||
/*
|
||||
* We want to flush the TLBs only after we're certain all the PTE
|
||||
@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
|
||||
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
|
||||
gen6_pte_t __iomem *gte;
|
||||
gen6_pte_t __iomem *end;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t addr;
|
||||
|
||||
gte = (gen6_pte_t __iomem *)ggtt->gsm;
|
||||
gte += vma->node.start / I915_GTT_PAGE_SIZE;
|
||||
end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
|
||||
|
||||
for_each_sgt_daddr(addr, iter, vma->pages)
|
||||
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
|
||||
iowrite32(vm->pte_encode(addr, level, flags), gte++);
|
||||
GEM_BUG_ON(gte > end);
|
||||
|
||||
/* Fill the allocated but "unused" space beyond the end of the buffer */
|
||||
while (gte < end)
|
||||
iowrite32(vm->scratch[0].encode, gte++);
|
||||
|
||||
/*
|
||||
* We want to flush the TLBs only after we're certain all the PTE
|
||||
|
Loading…
Reference in New Issue
Block a user