mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 13:06:39 +07:00
drm/i915: Split i915_gem_flush_ring() into seperate invalidate/flush funcs
By moving the function to intel_ringbuffer and currying the appropriate parameter, hopefully we make the callsites easier to read and understand. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
016fd0c1ae
commit
a7b9761d0a
@ -1256,9 +1256,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
|
@ -1549,13 +1549,9 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
* is that the flush _must_ happen before the next request, no matter
|
||||
* what.
|
||||
*/
|
||||
if (ring->gpu_caches_dirty) {
|
||||
ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
}
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (request == NULL) {
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
@ -2254,25 +2250,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_flush_ring(struct intel_ring_buffer *ring,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
|
||||
|
||||
ret = ring->flush(ring, invalidate_domains, flush_domains);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (list_empty(&ring->active_list))
|
||||
|
@ -707,14 +707,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||
* any residual writes from the previous batch.
|
||||
*/
|
||||
ret = i915_gem_flush_ring(ring,
|
||||
I915_GEM_GPU_DOMAINS,
|
||||
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
return intel_ring_invalidate_all_caches(ring);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -1564,3 +1564,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!ring->gpu_caches_dirty)
|
||||
return 0;
|
||||
|
||||
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t flush_domains;
|
||||
int ret;
|
||||
|
||||
flush_domains = 0;
|
||||
if (ring->gpu_caches_dirty)
|
||||
flush_domains = I915_GEM_GPU_DOMAINS;
|
||||
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
@ -195,6 +195,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
|
||||
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user