mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915: cleanup cache-coloring
Try to tidy up the cache-coloring such that we rid the code of any mm.color_adjust assumptions, this should hopefully make it more obvious in the code when we need to actually use the cache-level as the color, and as a bonus should make adding a different color-scheme simpler. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190909124052.22900-3-matthew.auld@intel.com
This commit is contained in:
parent
e9ceb751ad
commit
33dd889923
@ -294,8 +294,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(vma, &obj->vma.list, obj_link)
|
list_for_each_entry(vma, &obj->vma.list, obj_link) {
|
||||||
vma->node.color = cache_level;
|
if (i915_vm_has_cache_coloring(vma->vm))
|
||||||
|
vma->node.color = cache_level;
|
||||||
|
}
|
||||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||||
obj->cache_dirty = true; /* Always invalidate stale cachelines */
|
obj->cache_dirty = true; /* Always invalidate stale cachelines */
|
||||||
|
|
||||||
|
@ -2364,7 +2364,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
|
|||||||
/* i915_gem_evict.c */
|
/* i915_gem_evict.c */
|
||||||
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
||||||
u64 min_size, u64 alignment,
|
u64 min_size, u64 alignment,
|
||||||
unsigned cache_level,
|
unsigned long color,
|
||||||
u64 start, u64 end,
|
u64 start, u64 end,
|
||||||
unsigned flags);
|
unsigned flags);
|
||||||
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
|
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||||
|
@ -70,7 +70,7 @@ mark_free(struct drm_mm_scan *scan,
|
|||||||
* @vm: address space to evict from
|
* @vm: address space to evict from
|
||||||
* @min_size: size of the desired free space
|
* @min_size: size of the desired free space
|
||||||
* @alignment: alignment constraint of the desired free space
|
* @alignment: alignment constraint of the desired free space
|
||||||
* @cache_level: cache_level for the desired space
|
* @color: color for the desired space
|
||||||
* @start: start (inclusive) of the range from which to evict objects
|
* @start: start (inclusive) of the range from which to evict objects
|
||||||
* @end: end (exclusive) of the range from which to evict objects
|
* @end: end (exclusive) of the range from which to evict objects
|
||||||
* @flags: additional flags to control the eviction algorithm
|
* @flags: additional flags to control the eviction algorithm
|
||||||
@ -91,7 +91,7 @@ mark_free(struct drm_mm_scan *scan,
|
|||||||
int
|
int
|
||||||
i915_gem_evict_something(struct i915_address_space *vm,
|
i915_gem_evict_something(struct i915_address_space *vm,
|
||||||
u64 min_size, u64 alignment,
|
u64 min_size, u64 alignment,
|
||||||
unsigned cache_level,
|
unsigned long color,
|
||||||
u64 start, u64 end,
|
u64 start, u64 end,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
|||||||
if (flags & PIN_MAPPABLE)
|
if (flags & PIN_MAPPABLE)
|
||||||
mode = DRM_MM_INSERT_LOW;
|
mode = DRM_MM_INSERT_LOW;
|
||||||
drm_mm_scan_init_with_range(&scan, &vm->mm,
|
drm_mm_scan_init_with_range(&scan, &vm->mm,
|
||||||
min_size, alignment, cache_level,
|
min_size, alignment, color,
|
||||||
start, end, mode);
|
start, end, mode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -266,7 +266,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||||||
u64 start = target->start;
|
u64 start = target->start;
|
||||||
u64 end = start + target->size;
|
u64 end = start + target->size;
|
||||||
struct i915_vma *vma, *next;
|
struct i915_vma *vma, *next;
|
||||||
bool check_color;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
||||||
@ -283,8 +282,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||||||
if (!(flags & PIN_NONBLOCK))
|
if (!(flags & PIN_NONBLOCK))
|
||||||
i915_retire_requests(vm->i915);
|
i915_retire_requests(vm->i915);
|
||||||
|
|
||||||
check_color = vm->mm.color_adjust;
|
if (i915_vm_has_cache_coloring(vm)) {
|
||||||
if (check_color) {
|
|
||||||
/* Expand search to cover neighbouring guard pages (or lack!) */
|
/* Expand search to cover neighbouring guard pages (or lack!) */
|
||||||
if (start)
|
if (start)
|
||||||
start -= I915_GTT_PAGE_SIZE;
|
start -= I915_GTT_PAGE_SIZE;
|
||||||
@ -310,7 +308,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||||||
* abutt and conflict. If they are in conflict, then we evict
|
* abutt and conflict. If they are in conflict, then we evict
|
||||||
* those as well to make room for our guard pages.
|
* those as well to make room for our guard pages.
|
||||||
*/
|
*/
|
||||||
if (check_color) {
|
if (i915_vm_has_cache_coloring(vm)) {
|
||||||
if (node->start + node->size == target->start) {
|
if (node->start + node->size == target->start) {
|
||||||
if (node->color == target->color)
|
if (node->color == target->color)
|
||||||
continue;
|
continue;
|
||||||
|
@ -376,6 +376,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
|
|||||||
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
|
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
i915_vm_has_cache_coloring(struct i915_address_space *vm)
|
||||||
|
{
|
||||||
|
return i915_is_ggtt(vm) && vm->mm.color_adjust;
|
||||||
|
}
|
||||||
|
|
||||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||||
* collateral associated with any va->pa translations GEN hardware also has a
|
* collateral associated with any va->pa translations GEN hardware also has a
|
||||||
|
@ -477,7 +477,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
|
|||||||
vma->flags &= ~I915_VMA_CAN_FENCE;
|
vma->flags &= ~I915_VMA_CAN_FENCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
|
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *node = &vma->node;
|
struct drm_mm_node *node = &vma->node;
|
||||||
struct drm_mm_node *other;
|
struct drm_mm_node *other;
|
||||||
@ -489,7 +489,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
|
|||||||
* these constraints apply and set the drm_mm.color_adjust
|
* these constraints apply and set the drm_mm.color_adjust
|
||||||
* appropriately.
|
* appropriately.
|
||||||
*/
|
*/
|
||||||
if (vma->vm->mm.color_adjust == NULL)
|
if (!i915_vm_has_cache_coloring(vma->vm))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Only valid to be called on an already inserted vma */
|
/* Only valid to be called on an already inserted vma */
|
||||||
@ -497,12 +497,12 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
|
|||||||
GEM_BUG_ON(list_empty(&node->node_list));
|
GEM_BUG_ON(list_empty(&node->node_list));
|
||||||
|
|
||||||
other = list_prev_entry(node, node_list);
|
other = list_prev_entry(node, node_list);
|
||||||
if (i915_node_color_differs(other, cache_level) &&
|
if (i915_node_color_differs(other, color) &&
|
||||||
!drm_mm_hole_follows(other))
|
!drm_mm_hole_follows(other))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
other = list_next_entry(node, node_list);
|
other = list_next_entry(node, node_list);
|
||||||
if (i915_node_color_differs(other, cache_level) &&
|
if (i915_node_color_differs(other, color) &&
|
||||||
!drm_mm_hole_follows(node))
|
!drm_mm_hole_follows(node))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -539,7 +539,7 @@ static int
|
|||||||
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vma->vm->i915;
|
struct drm_i915_private *dev_priv = vma->vm->i915;
|
||||||
unsigned int cache_level;
|
unsigned long color;
|
||||||
u64 start, end;
|
u64 start, end;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -580,14 +580,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
color = 0;
|
||||||
if (vma->obj) {
|
if (vma->obj) {
|
||||||
ret = i915_gem_object_pin_pages(vma->obj);
|
ret = i915_gem_object_pin_pages(vma->obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
cache_level = vma->obj->cache_level;
|
if (i915_vm_has_cache_coloring(vma->vm))
|
||||||
} else {
|
color = vma->obj->cache_level;
|
||||||
cache_level = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GEM_BUG_ON(vma->pages);
|
GEM_BUG_ON(vma->pages);
|
||||||
@ -605,7 +605,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
|
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
|
||||||
size, offset, cache_level,
|
size, offset, color,
|
||||||
flags);
|
flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_clear;
|
goto err_clear;
|
||||||
@ -644,7 +644,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
|
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
|
||||||
size, alignment, cache_level,
|
size, alignment, color,
|
||||||
start, end, flags);
|
start, end, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_clear;
|
goto err_clear;
|
||||||
@ -653,7 +653,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||||||
GEM_BUG_ON(vma->node.start + vma->node.size > end);
|
GEM_BUG_ON(vma->node.start + vma->node.size > end);
|
||||||
}
|
}
|
||||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||||
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
|
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
|
||||||
|
|
||||||
mutex_lock(&vma->vm->mutex);
|
mutex_lock(&vma->vm->mutex);
|
||||||
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
|
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
|
||||||
|
@ -295,7 +295,7 @@ i915_vma_compare(struct i915_vma *vma,
|
|||||||
|
|
||||||
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||||
u32 flags);
|
u32 flags);
|
||||||
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
|
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
|
||||||
bool i915_vma_misplaced(const struct i915_vma *vma,
|
bool i915_vma_misplaced(const struct i915_vma *vma,
|
||||||
u64 size, u64 alignment, u64 flags);
|
u64 size, u64 alignment, u64 flags);
|
||||||
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
|
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
|
||||||
|
@ -274,12 +274,14 @@ static int igt_evict_for_cache_color(void *arg)
|
|||||||
LIST_HEAD(objects);
|
LIST_HEAD(objects);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Currently the use of color_adjust is limited to cache domains within
|
/*
|
||||||
* the ggtt, and so the presence of mm.color_adjust is assumed to be
|
* Currently the use of color_adjust for the GGTT is limited to cache
|
||||||
* i915_ggtt_color_adjust throughout our driver, so using a mock color
|
* coloring and guard pages, and so the presence of mm.color_adjust for
|
||||||
* adjust will work just fine for our purposes.
|
* the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
|
||||||
|
* color adjust will work just fine for our purposes.
|
||||||
*/
|
*/
|
||||||
ggtt->vm.mm.color_adjust = mock_color_adjust;
|
ggtt->vm.mm.color_adjust = mock_color_adjust;
|
||||||
|
GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
|
||||||
|
|
||||||
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
|
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
|
||||||
if (IS_ERR(obj)) {
|
if (IS_ERR(obj)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user