mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-02 21:14:25 +07:00
drm/i915: Remove the now redundant 'obj->ring'
The ring member of the object structure was always updated with the last_read_seqno member. Thus with the conversion to last_read_req, obj->ring is now a direct copy of obj->last_read_req->ring. This makes it somewhat redundant and potentially misleading (especially as there was no comment to explain its purpose). This checkin removes the redundant field. Many uses were simply testing for non-null to see if the object is active on the GPU. Some of these have been converted to check 'obj->active' instead. Others (where the last_read_req is about to be used anyway) have been changed to check obj->last_read_req. The rest simply pull the ring out from the request structure and proceed as before. For: VIZ-4377 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
1b5a433a4d
commit
41c5241555
@ -166,8 +166,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
*t = '\0';
|
||||
seq_printf(m, " (%s mappable)", s);
|
||||
}
|
||||
if (obj->ring != NULL)
|
||||
seq_printf(m, " (%s)", obj->ring->name);
|
||||
if (obj->last_read_req != NULL)
|
||||
seq_printf(m, " (%s)",
|
||||
i915_gem_request_get_ring(obj->last_read_req)->name);
|
||||
if (obj->frontbuffer_bits)
|
||||
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
|
||||
}
|
||||
@ -334,7 +335,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
||||
if (ppgtt->file_priv != stats->file_priv)
|
||||
continue;
|
||||
|
||||
if (obj->ring) /* XXX per-vma statistic */
|
||||
if (obj->active) /* XXX per-vma statistic */
|
||||
stats->active += obj->base.size;
|
||||
else
|
||||
stats->inactive += obj->base.size;
|
||||
@ -344,7 +345,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
||||
} else {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
stats->global += obj->base.size;
|
||||
if (obj->ring)
|
||||
if (obj->active)
|
||||
stats->active += obj->base.size;
|
||||
else
|
||||
stats->inactive += obj->base.size;
|
||||
|
@ -1941,8 +1941,6 @@ struct drm_i915_gem_object {
|
||||
void *dma_buf_vmapping;
|
||||
int vmapping_count;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
struct drm_i915_gem_request *last_read_req;
|
||||
struct drm_i915_gem_request *last_write_req;
|
||||
|
@ -2263,14 +2263,18 @@ static void
|
||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *req = intel_ring_get_request(ring);
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_engine_cs *old_ring;
|
||||
|
||||
BUG_ON(ring == NULL);
|
||||
if (obj->ring != ring && obj->last_write_req) {
|
||||
|
||||
req = intel_ring_get_request(ring);
|
||||
old_ring = i915_gem_request_get_ring(obj->last_read_req);
|
||||
|
||||
if (old_ring != ring && obj->last_write_req) {
|
||||
/* Keep the request relative to the current ring */
|
||||
i915_gem_request_assign(&obj->last_write_req, req);
|
||||
}
|
||||
obj->ring = ring;
|
||||
|
||||
/* Add a reference if we're newly entering the active list. */
|
||||
if (!obj->active) {
|
||||
@ -2309,7 +2313,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
||||
intel_fb_obj_flush(obj, true);
|
||||
|
||||
list_del_init(&obj->ring_list);
|
||||
obj->ring = NULL;
|
||||
|
||||
i915_gem_request_assign(&obj->last_read_req, NULL);
|
||||
i915_gem_request_assign(&obj->last_write_req, NULL);
|
||||
@ -2326,9 +2329,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
||||
static void
|
||||
i915_gem_object_retire(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_engine_cs *ring = obj->ring;
|
||||
|
||||
if (ring == NULL)
|
||||
if (obj->last_read_req == NULL)
|
||||
return;
|
||||
|
||||
if (i915_gem_request_completed(obj->last_read_req, true))
|
||||
@ -2861,14 +2862,17 @@ i915_gem_idle_work_handler(struct work_struct *work)
|
||||
static int
|
||||
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int ret;
|
||||
|
||||
if (obj->active) {
|
||||
ring = i915_gem_request_get_ring(obj->last_read_req);
|
||||
|
||||
ret = i915_gem_check_olr(obj->last_read_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests_ring(obj->ring);
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2971,10 +2975,12 @@ int
|
||||
i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *to)
|
||||
{
|
||||
struct intel_engine_cs *from = obj->ring;
|
||||
struct intel_engine_cs *from;
|
||||
u32 seqno;
|
||||
int ret, idx;
|
||||
|
||||
from = i915_gem_request_get_ring(obj->last_read_req);
|
||||
|
||||
if (from == NULL || to == from)
|
||||
return 0;
|
||||
|
||||
@ -3929,7 +3935,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
bool was_pin_display;
|
||||
int ret;
|
||||
|
||||
if (pipelined != obj->ring) {
|
||||
if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
|
||||
ret = i915_gem_object_sync(obj, pipelined);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -4284,9 +4290,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_object_flush_active(obj);
|
||||
|
||||
args->busy = obj->active;
|
||||
if (obj->ring) {
|
||||
if (obj->last_read_req) {
|
||||
struct intel_engine_cs *ring;
|
||||
BUILD_BUG_ON(I915_NUM_RINGS > 16);
|
||||
args->busy |= intel_ring_flag(obj->ring) << 16;
|
||||
ring = i915_gem_request_get_ring(obj->last_read_req);
|
||||
args->busy |= intel_ring_flag(ring) << 16;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
|
@ -619,7 +619,8 @@ static int do_switch(struct intel_engine_cs *ring,
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
from->legacy_hw_ctx.rcs_state->dirty = 1;
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
|
||||
BUG_ON(i915_gem_request_get_ring(
|
||||
from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
|
||||
|
||||
/* obj is kept alive until the next request by its active ref */
|
||||
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
|
||||
|
@ -683,7 +683,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
err->dirty = obj->dirty;
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->userptr = obj->userptr.mm != NULL;
|
||||
err->ring = obj->ring ? obj->ring->id : -1;
|
||||
err->ring = obj->last_read_req ?
|
||||
i915_gem_request_get_ring(obj->last_read_req)->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
||||
|
@ -9528,7 +9528,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
|
||||
else if (i915.enable_execlists)
|
||||
return true;
|
||||
else
|
||||
return ring != obj->ring;
|
||||
return ring != i915_gem_request_get_ring(obj->last_read_req);
|
||||
}
|
||||
|
||||
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
|
||||
@ -9888,7 +9888,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
ring = &dev_priv->ring[BCS];
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
ring = obj->ring;
|
||||
ring = i915_gem_request_get_ring(obj->last_read_req);
|
||||
if (ring == NULL || ring->id != RCS)
|
||||
ring = &dev_priv->ring[BCS];
|
||||
} else {
|
||||
@ -9910,7 +9910,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
i915_gem_request_assign(&work->flip_queued_req,
|
||||
obj->last_write_req);
|
||||
work->flip_queued_ring = obj->ring;
|
||||
work->flip_queued_ring =
|
||||
i915_gem_request_get_ring(obj->last_write_req);
|
||||
} else {
|
||||
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
|
||||
page_flip_flags);
|
||||
|
Loading…
Reference in New Issue
Block a user