2019-02-05 20:00:02 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-06-22 01:37:58 +07:00
|
|
|
#include <linux/debugobjects.h>
|
|
|
|
|
2019-06-18 14:41:28 +07:00
|
|
|
#include "gt/intel_engine_pm.h"
|
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_active.h"
|
2019-03-06 04:38:30 +07:00
|
|
|
#include "i915_globals.h"
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
|
|
|
|
|
2019-02-05 20:00:04 +07:00
|
|
|
/*
|
|
|
|
* Active refs memory management
|
|
|
|
*
|
|
|
|
* To be more economical with memory, we reap all the i915_active trees as
|
|
|
|
* they idle (when we know the active requests are inactive) and allocate the
|
|
|
|
* nodes from a local slab cache to hopefully reduce the fragmentation.
|
|
|
|
*/
|
|
|
|
static struct i915_global_active {
|
2019-03-06 04:38:30 +07:00
|
|
|
struct i915_global base;
|
2019-02-05 20:00:04 +07:00
|
|
|
struct kmem_cache *slab_cache;
|
|
|
|
} global;
|
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
struct active_node {
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
struct i915_active_request base;
|
2019-02-05 20:00:02 +07:00
|
|
|
struct i915_active *ref;
|
|
|
|
struct rb_node node;
|
|
|
|
u64 timeline;
|
|
|
|
};
|
|
|
|
|
2019-06-22 01:37:58 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
|
|
|
|
|
|
|
|
static void *active_debug_hint(void *addr)
|
|
|
|
{
|
|
|
|
struct i915_active *ref = addr;
|
|
|
|
|
|
|
|
return (void *)ref->retire ?: (void *)ref;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct debug_obj_descr active_debug_desc = {
|
|
|
|
.name = "i915_active",
|
|
|
|
.debug_hint = active_debug_hint,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void debug_active_init(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_init(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_activate(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_activate(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_deactivate(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_deactivate(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_fini(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_free(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_active_assert(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
debug_object_assert_init(ref, &active_debug_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void debug_active_init(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_activate(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_deactivate(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_fini(struct i915_active *ref) { }
|
|
|
|
static inline void debug_active_assert(struct i915_active *ref) { }
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2019-02-05 20:00:03 +07:00
|
|
|
static void
|
|
|
|
__active_park(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct active_node *it, *n;
|
|
|
|
|
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
GEM_BUG_ON(i915_active_request_isset(&it->base));
|
2019-02-05 20:00:04 +07:00
|
|
|
kmem_cache_free(global.slab_cache, it);
|
2019-02-05 20:00:03 +07:00
|
|
|
}
|
|
|
|
ref->tree = RB_ROOT;
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
static void
|
|
|
|
__active_retire(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!ref->count);
|
2019-02-05 20:00:03 +07:00
|
|
|
if (--ref->count)
|
|
|
|
return;
|
|
|
|
|
2019-06-22 01:37:58 +07:00
|
|
|
debug_active_deactivate(ref);
|
|
|
|
|
2019-02-05 20:00:03 +07:00
|
|
|
/* return the unused nodes to our slabcache */
|
|
|
|
__active_park(ref);
|
|
|
|
|
|
|
|
ref->retire(ref);
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
node_retire(struct i915_active_request *base, struct i915_request *rq)
|
2019-02-05 20:00:02 +07:00
|
|
|
{
|
|
|
|
__active_retire(container_of(base, struct active_node, base)->ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
last_retire(struct i915_active_request *base, struct i915_request *rq)
|
2019-02-05 20:00:02 +07:00
|
|
|
{
|
|
|
|
__active_retire(container_of(base, struct i915_active, last));
|
|
|
|
}
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
static struct i915_active_request *
|
2019-02-05 20:00:02 +07:00
|
|
|
active_instance(struct i915_active *ref, u64 idx)
|
|
|
|
{
|
|
|
|
struct active_node *node;
|
|
|
|
struct rb_node **p, *parent;
|
|
|
|
struct i915_request *old;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We track the most recently used timeline to skip a rbtree search
|
|
|
|
* for the common case, under typical loads we never need the rbtree
|
|
|
|
* at all. We can reuse the last slot if it is empty, that is
|
|
|
|
* after the previous activity has been retired, or if it matches the
|
|
|
|
* current timeline.
|
|
|
|
*
|
|
|
|
* Note that we allow the timeline to be active simultaneously in
|
|
|
|
* the rbtree and the last cache. We do this to avoid having
|
|
|
|
* to search and replace the rbtree element for a new timeline, with
|
|
|
|
* the cost being that we must be aware that the ref may be retired
|
|
|
|
* twice for the same timeline (as the older rbtree element will be
|
|
|
|
* retired before the new request added to last).
|
|
|
|
*/
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
old = i915_active_request_raw(&ref->last, BKL(ref));
|
2019-02-05 20:00:02 +07:00
|
|
|
if (!old || old->fence.context == idx)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Move the currently active fence into the rbtree */
|
|
|
|
idx = old->fence.context;
|
|
|
|
|
|
|
|
parent = NULL;
|
|
|
|
p = &ref->tree.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
|
|
|
|
node = rb_entry(parent, struct active_node, node);
|
|
|
|
if (node->timeline == idx)
|
|
|
|
goto replace;
|
|
|
|
|
|
|
|
if (node->timeline < idx)
|
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
p = &parent->rb_left;
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:00:04 +07:00
|
|
|
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
/* kmalloc may retire the ref->last (thanks shrinker)! */
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
|
2019-02-05 20:00:04 +07:00
|
|
|
kmem_cache_free(global.slab_cache, node);
|
2019-02-05 20:00:02 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!node))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
i915_active_request_init(&node->base, NULL, node_retire);
|
2019-02-05 20:00:02 +07:00
|
|
|
node->ref = ref;
|
|
|
|
node->timeline = idx;
|
|
|
|
|
|
|
|
rb_link_node(&node->node, parent, p);
|
|
|
|
rb_insert_color(&node->node, &ref->tree);
|
|
|
|
|
|
|
|
replace:
|
|
|
|
/*
|
|
|
|
* Overwrite the previous active slot in the rbtree with last,
|
|
|
|
* leaving last zeroed. If the previous slot is still active,
|
|
|
|
* we must be careful as we now only expect to receive one retire
|
|
|
|
* callback not two, and so much undo the active counting for the
|
|
|
|
* overwritten slot.
|
|
|
|
*/
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
if (i915_active_request_isset(&node->base)) {
|
2019-02-05 20:00:02 +07:00
|
|
|
/* Retire ourselves from the old rq->active_list */
|
|
|
|
__list_del_entry(&node->base.link);
|
|
|
|
ref->count--;
|
|
|
|
GEM_BUG_ON(!ref->count);
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(list_empty(&ref->last.link));
|
|
|
|
list_replace_init(&ref->last.link, &node->base.link);
|
|
|
|
node->base.request = fetch_and_zero(&ref->last.request);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return &ref->last;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_init(struct drm_i915_private *i915,
|
|
|
|
struct i915_active *ref,
|
|
|
|
void (*retire)(struct i915_active *ref))
|
|
|
|
{
|
2019-06-22 01:37:58 +07:00
|
|
|
debug_active_init(ref);
|
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
ref->i915 = i915;
|
|
|
|
ref->retire = retire;
|
|
|
|
ref->tree = RB_ROOT;
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
i915_active_request_init(&ref->last, NULL, last_retire);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
init_llist_head(&ref->barriers);
|
2019-02-05 20:00:02 +07:00
|
|
|
ref->count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_active_ref(struct i915_active *ref,
|
|
|
|
u64 timeline,
|
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
struct i915_active_request *active;
|
2019-02-08 20:47:04 +07:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Prevent reaping in case we malloc/wait while building the tree */
|
|
|
|
i915_active_acquire(ref);
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
active = active_instance(ref, timeline);
|
2019-02-08 20:47:04 +07:00
|
|
|
if (IS_ERR(active)) {
|
|
|
|
err = PTR_ERR(active);
|
|
|
|
goto out;
|
|
|
|
}
|
2019-02-05 20:00:02 +07:00
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
if (!i915_active_request_isset(active))
|
2019-02-05 20:00:02 +07:00
|
|
|
ref->count++;
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
__i915_active_request_set(active, rq);
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
GEM_BUG_ON(!ref->count);
|
2019-02-08 20:47:04 +07:00
|
|
|
out:
|
|
|
|
i915_active_release(ref);
|
|
|
|
return err;
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
bool i915_active_acquire(struct i915_active *ref)
|
|
|
|
{
|
2019-06-22 01:37:58 +07:00
|
|
|
debug_active_assert(ref);
|
2019-02-05 20:00:02 +07:00
|
|
|
lockdep_assert_held(BKL(ref));
|
2019-06-22 01:37:58 +07:00
|
|
|
|
|
|
|
if (ref->count++)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
debug_active_activate(ref);
|
|
|
|
return true;
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_release(struct i915_active *ref)
|
|
|
|
{
|
2019-06-22 01:37:58 +07:00
|
|
|
debug_active_assert(ref);
|
2019-02-05 20:00:02 +07:00
|
|
|
lockdep_assert_held(BKL(ref));
|
2019-06-22 01:37:58 +07:00
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
__active_retire(ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_active_wait(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct active_node *it, *n;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (i915_active_acquire(ref))
|
|
|
|
goto out_release;
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
ret = i915_active_request_retire(&ref->last, BKL(ref));
|
2019-02-05 20:00:02 +07:00
|
|
|
if (ret)
|
|
|
|
goto out_release;
|
|
|
|
|
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
ret = i915_active_request_retire(&it->base, BKL(ref));
|
2019-02-05 20:00:02 +07:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_release:
|
|
|
|
i915_active_release(ref);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
int i915_request_await_active_request(struct i915_request *rq,
|
|
|
|
struct i915_active_request *active)
|
2019-02-05 20:00:02 +07:00
|
|
|
{
|
|
|
|
struct i915_request *barrier =
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct active_node *it, *n;
|
2019-02-08 20:47:04 +07:00
|
|
|
int err = 0;
|
2019-02-05 20:00:02 +07:00
|
|
|
|
2019-02-08 20:47:04 +07:00
|
|
|
/* await allocates and so we need to avoid hitting the shrinker */
|
|
|
|
if (i915_active_acquire(ref))
|
|
|
|
goto out; /* was idle */
|
|
|
|
|
|
|
|
err = i915_request_await_active_request(rq, &ref->last);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-02-05 20:00:02 +07:00
|
|
|
|
|
|
|
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
|
2019-02-08 20:47:04 +07:00
|
|
|
err = i915_request_await_active_request(rq, &it->base);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
|
|
|
|
2019-02-08 20:47:04 +07:00
|
|
|
out:
|
|
|
|
i915_active_release(ref);
|
|
|
|
return err;
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
|
|
|
|
2019-02-05 20:00:03 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
2019-02-05 20:00:02 +07:00
|
|
|
void i915_active_fini(struct i915_active *ref)
|
|
|
|
{
|
2019-06-22 01:37:58 +07:00
|
|
|
debug_active_fini(ref);
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
GEM_BUG_ON(i915_active_request_isset(&ref->last));
|
2019-02-05 20:00:03 +07:00
|
|
|
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
|
|
|
|
GEM_BUG_ON(ref->count);
|
2019-02-05 20:00:02 +07:00
|
|
|
}
|
2019-02-05 20:00:03 +07:00
|
|
|
#endif
|
2019-02-05 20:00:02 +07:00
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = engine->i915;
|
2019-06-18 14:41:28 +07:00
|
|
|
struct llist_node *pos, *next;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
unsigned long tmp;
|
2019-06-18 14:41:28 +07:00
|
|
|
int err;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
|
|
|
|
GEM_BUG_ON(!engine->mask);
|
|
|
|
for_each_engine_masked(engine, i915, engine->mask, tmp) {
|
|
|
|
struct intel_context *kctx = engine->kernel_context;
|
|
|
|
struct active_node *node;
|
|
|
|
|
|
|
|
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
|
|
|
|
if (unlikely(!node)) {
|
|
|
|
err = -ENOMEM;
|
2019-06-18 14:41:28 +07:00
|
|
|
goto unwind;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
i915_active_request_init(&node->base,
|
|
|
|
(void *)engine, node_retire);
|
|
|
|
node->timeline = kctx->ring->timeline->fence_context;
|
|
|
|
node->ref = ref;
|
|
|
|
ref->count++;
|
|
|
|
|
2019-06-18 14:41:28 +07:00
|
|
|
intel_engine_pm_get(engine);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
llist_add((struct llist_node *)&node->base.link,
|
|
|
|
&ref->barriers);
|
|
|
|
}
|
|
|
|
|
2019-06-18 14:41:28 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
|
|
|
|
struct active_node *node;
|
|
|
|
|
|
|
|
node = container_of((struct list_head *)pos,
|
|
|
|
typeof(*node), base.link);
|
|
|
|
engine = (void *)rcu_access_pointer(node->base.request);
|
|
|
|
|
|
|
|
intel_engine_pm_put(engine);
|
|
|
|
kmem_cache_free(global.slab_cache, node);
|
|
|
|
}
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_acquire_barrier(struct i915_active *ref)
|
|
|
|
{
|
|
|
|
struct llist_node *pos, *next;
|
|
|
|
|
|
|
|
i915_active_acquire(ref);
|
|
|
|
|
|
|
|
llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct active_node *node;
|
|
|
|
struct rb_node **p, *parent;
|
|
|
|
|
|
|
|
node = container_of((struct list_head *)pos,
|
|
|
|
typeof(*node), base.link);
|
|
|
|
|
|
|
|
engine = (void *)rcu_access_pointer(node->base.request);
|
|
|
|
RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
|
|
|
|
|
|
|
|
parent = NULL;
|
|
|
|
p = &ref->tree.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
if (rb_entry(parent,
|
|
|
|
struct active_node,
|
|
|
|
node)->timeline < node->timeline)
|
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
p = &parent->rb_left;
|
|
|
|
}
|
|
|
|
rb_link_node(&node->node, parent, p);
|
|
|
|
rb_insert_color(&node->node, &ref->tree);
|
|
|
|
|
|
|
|
llist_add((struct llist_node *)&node->base.link,
|
|
|
|
&engine->barrier_tasks);
|
2019-06-18 14:41:28 +07:00
|
|
|
intel_engine_pm_put(engine);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
}
|
|
|
|
i915_active_release(ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_request_add_barriers(struct i915_request *rq)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
|
|
|
struct llist_node *node, *next;
|
|
|
|
|
|
|
|
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
|
|
|
|
list_add_tail((struct list_head *)node, &rq->active_list);
|
|
|
|
}
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
int i915_active_request_set(struct i915_active_request *active,
|
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Must maintain ordering wrt previous active requests */
|
|
|
|
err = i915_request_await_active_request(rq, active);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
__i915_active_request_set(active, rq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_active_retire_noop(struct i915_active_request *active,
|
|
|
|
struct i915_request *request)
|
|
|
|
{
|
|
|
|
/* Space left intentionally blank */
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:00:02 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftests/i915_active.c"
|
|
|
|
#endif
|
2019-02-05 20:00:04 +07:00
|
|
|
|
2019-03-06 04:38:30 +07:00
|
|
|
static void i915_global_active_shrink(void)
|
2019-02-05 20:00:04 +07:00
|
|
|
{
|
2019-03-06 04:38:30 +07:00
|
|
|
kmem_cache_shrink(global.slab_cache);
|
2019-02-05 20:00:04 +07:00
|
|
|
}
|
|
|
|
|
2019-03-06 04:38:30 +07:00
|
|
|
static void i915_global_active_exit(void)
|
2019-02-28 17:20:33 +07:00
|
|
|
{
|
2019-03-06 04:38:30 +07:00
|
|
|
kmem_cache_destroy(global.slab_cache);
|
2019-02-28 17:20:33 +07:00
|
|
|
}
|
|
|
|
|
2019-03-06 04:38:30 +07:00
|
|
|
static struct i915_global_active global = { {
|
|
|
|
.shrink = i915_global_active_shrink,
|
|
|
|
.exit = i915_global_active_exit,
|
|
|
|
} };
|
|
|
|
|
|
|
|
int __init i915_global_active_init(void)
|
2019-02-05 20:00:04 +07:00
|
|
|
{
|
2019-03-06 04:38:30 +07:00
|
|
|
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!global.slab_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_global_register(&global.base);
|
|
|
|
return 0;
|
2019-02-05 20:00:04 +07:00
|
|
|
}
|