mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 16:16:42 +07:00
drm/i915: Remove chipset flush after cache flush
We always flush the chipset prior to executing with the GPU, so we can
skip the flush during ordinary domain management.
This should help mitigate some of the potential performance regressions,
but likely trivial, from doing the flush unconditionally before execbuf
introduced in commit dcd79934b0
("drm/i915: Unconditionally flush any
chipset buffers before execbuf")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20161106130001.9509-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
parent
24327f837f
commit
d0da48cf92
@ -3403,7 +3403,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
|||||||
|
|
||||||
void i915_gem_reset(struct drm_i915_private *dev_priv);
|
void i915_gem_reset(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
|
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
|
||||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||||
int __must_check i915_gem_init(struct drm_device *dev);
|
int __must_check i915_gem_init(struct drm_device *dev);
|
||||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||||
|
@ -3196,23 +3196,22 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||||
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
bool force)
|
||||||
bool force)
|
|
||||||
{
|
{
|
||||||
/* If we don't have a page list set up, then we're not pinned
|
/* If we don't have a page list set up, then we're not pinned
|
||||||
* to GPU, and we can ignore the cache flush because it'll happen
|
* to GPU, and we can ignore the cache flush because it'll happen
|
||||||
* again at bind time.
|
* again at bind time.
|
||||||
*/
|
*/
|
||||||
if (!obj->mm.pages)
|
if (!obj->mm.pages)
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stolen memory is always coherent with the GPU as it is explicitly
|
* Stolen memory is always coherent with the GPU as it is explicitly
|
||||||
* marked as wc by the system, or the system is cache-coherent.
|
* marked as wc by the system, or the system is cache-coherent.
|
||||||
*/
|
*/
|
||||||
if (obj->stolen || obj->phys_handle)
|
if (obj->stolen || obj->phys_handle)
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
/* If the GPU is snooping the contents of the CPU cache,
|
/* If the GPU is snooping the contents of the CPU cache,
|
||||||
* we do not need to manually clear the CPU cache lines. However,
|
* we do not need to manually clear the CPU cache lines. However,
|
||||||
@ -3224,14 +3223,12 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
|||||||
*/
|
*/
|
||||||
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
|
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
|
||||||
obj->cache_dirty = true;
|
obj->cache_dirty = true;
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_i915_gem_object_clflush(obj);
|
trace_i915_gem_object_clflush(obj);
|
||||||
drm_clflush_sg(obj->mm.pages);
|
drm_clflush_sg(obj->mm.pages);
|
||||||
obj->cache_dirty = false;
|
obj->cache_dirty = false;
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Flushes the GTT write domain for the object if it's dirty. */
|
/** Flushes the GTT write domain for the object if it's dirty. */
|
||||||
@ -3277,9 +3274,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
|
|||||||
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
|
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (i915_gem_clflush_object(obj, obj->pin_display))
|
i915_gem_clflush_object(obj, obj->pin_display);
|
||||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
|
||||||
|
|
||||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||||
|
|
||||||
obj->base.write_domain = 0;
|
obj->base.write_domain = 0;
|
||||||
@ -3486,10 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||||||
* object is now coherent at its new cache level (with respect
|
* object is now coherent at its new cache level (with respect
|
||||||
* to the access domain).
|
* to the access domain).
|
||||||
*/
|
*/
|
||||||
if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
|
if (obj->cache_dirty && cpu_write_needs_clflush(obj))
|
||||||
if (i915_gem_clflush_object(obj, true))
|
i915_gem_clflush_object(obj, true);
|
||||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user