drm/i915: accurate page size tracking for the ppgtt

Now that we support multiple page sizes for the ppgtt, it would be
useful to track the real usage for debugging purposes.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-16-matthew.auld@intel.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-15-chris@chris-wilson.co.uk
This commit is contained in:
Matthew Auld 2017-10-06 23:18:27 +01:00 committed by Chris Wilson
parent 17a00cf73c
commit d9ec12f8e3
2 changed files with 21 additions and 0 deletions

View File

@ -1053,6 +1053,8 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level); cache_level);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
} }
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
@ -1145,7 +1147,10 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vaddr = kmap_atomic_px(pd); vaddr = kmap_atomic_px(pd);
vaddr[idx.pde] |= GEN8_PDE_IPS_64K; vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
} }
vma->page_sizes.gtt |= page_size;
} while (iter->sg); } while (iter->sg);
} }
@ -1170,6 +1175,8 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
&iter, &idx, cache_level)) &iter, &idx, cache_level))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
} }
} }
@ -1891,6 +1898,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
} }
} while (1); } while (1);
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
} }
static int gen6_alloc_va_range(struct i915_address_space *vm, static int gen6_alloc_va_range(struct i915_address_space *vm,
@ -2598,6 +2607,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915); intel_runtime_pm_put(i915);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
/* /*
* Without aliasing PPGTT there's no difference between * Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally

View File

@ -169,6 +169,7 @@ struct drm_i915_gem_object {
struct sg_table *pages; struct sg_table *pages;
void *mapping; void *mapping;
/* TODO: whack some of this into the error state */
struct i915_page_sizes { struct i915_page_sizes {
/** /**
* The sg mask of the pages sg_table. i.e the mask of * The sg mask of the pages sg_table. i.e the mask of
@ -184,6 +185,15 @@ struct drm_i915_gem_object {
* to use opportunistically. * to use opportunistically.
*/ */
unsigned int sg; unsigned int sg;
/**
* The actual gtt page size usage. Since we can have
* multiple vma associated with this object we need to
* prevent any trampling of state, hence a copy of this
* struct also lives in each vma, therefore the gtt
* value here should only be read/write through the vma.
*/
unsigned int gtt;
} page_sizes; } page_sizes;
struct i915_gem_object_page_iter { struct i915_gem_object_page_iter {