mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-22 07:44:54 +07:00
drm/i915: i915_vma_move_to_active prep patch
This patch is broken out of the next just to remove the code motion from that patch and make it more readable. What we do here is move the i915_vma_move_to_active() to i915_gem_execbuffer.c and put the three stages (read, write, fenced) together so that future modifications to active handling are all located in the same spot. The importance of this is so that we can more simply control the order in which the requests are place in the retirement list (i.e. control the order at which we retire and so control the lifetimes to avoid having to hold onto references). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-24-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
4b8de8e68a
commit
5cf3d28098
@ -3153,7 +3153,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *to);
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req);
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
|
||||
return obj->mapping;
|
||||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
engine = i915_gem_request_get_engine(req);
|
||||
|
||||
/* Add a reference if we're newly entering the active list. */
|
||||
if (obj->active == 0)
|
||||
i915_gem_object_get(obj);
|
||||
obj->active |= intel_engine_flag(engine);
|
||||
|
||||
i915_gem_active_set(&obj->last_read[engine->id], req);
|
||||
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_retire__write(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *request)
|
||||
|
@ -816,8 +816,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from != NULL) {
|
||||
from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
|
||||
struct drm_i915_gem_object *obj = from->engine[RCS].state;
|
||||
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
@ -825,10 +825,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
* able to defer doing this until we know the object would be
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
from->engine[RCS].state->dirty = 1;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
|
||||
|
||||
/* obj is kept alive until the next request by its active ref */
|
||||
i915_gem_object_ggtt_unpin(from->engine[RCS].state);
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
i915_gem_context_put(from);
|
||||
}
|
||||
engine->last_context = i915_gem_context_get(to);
|
||||
|
@ -1143,43 +1143,64 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned int idx = req->engine->id;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
obj->dirty = 1; /* be paranoid */
|
||||
|
||||
/* Add a reference if we're newly entering the active list. */
|
||||
if (obj->active == 0)
|
||||
i915_gem_object_get(obj);
|
||||
obj->active |= 1 << idx;
|
||||
i915_gem_active_set(&obj->last_read[idx], req);
|
||||
|
||||
if (flags & EXEC_OBJECT_WRITE) {
|
||||
i915_gem_active_set(&obj->last_write, req);
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CS);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
|
||||
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
i915_gem_active_set(&obj->last_fence, req);
|
||||
if (flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
}
|
||||
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
obj->dirty = 1; /* be paranoid */
|
||||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
if (obj->base.write_domain == 0)
|
||||
if (obj->base.write_domain)
|
||||
vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
|
||||
else
|
||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
|
||||
i915_vma_move_to_active(vma, req);
|
||||
if (obj->base.write_domain) {
|
||||
i915_gem_active_set(&obj->last_write, req);
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CS);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
i915_gem_active_set(&obj->last_fence, req);
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
|
||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||
}
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(so.obj);
|
||||
err_obj:
|
||||
|
Loading…
Reference in New Issue
Block a user