mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 00:56:49 +07:00
drm/i915: Split i915_gem_timeline into individual timelines
We need to move to a more flexible timeline that doesn't assume one fence context per engine, and so allow for a single timeline to be used across a combination of engines. This means that preallocating a fence context per engine is now a hindrance, and so we want to introduce the singular timeline. From the code perspective, this has the notable advantage of clearing up a lot of mirky semantics and some clumsy pointer chasing. By splitting the timeline up into a single entity rather than an array of per-engine timelines, we can realise the goal of the previous patch of tracking the timeline alongside the ring. v2: Tweak wait_for_idle to stop the compiling thinking that ret may be uninitialised. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180502163839.3248-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
65fcb8064d
commit
a89d1f921c
@ -71,11 +71,11 @@ i915-y += i915_cmd_parser.o \
|
|||||||
i915_gem_shrinker.o \
|
i915_gem_shrinker.o \
|
||||||
i915_gem_stolen.o \
|
i915_gem_stolen.o \
|
||||||
i915_gem_tiling.o \
|
i915_gem_tiling.o \
|
||||||
i915_gem_timeline.o \
|
|
||||||
i915_gem_userptr.o \
|
i915_gem_userptr.o \
|
||||||
i915_gemfs.o \
|
i915_gemfs.o \
|
||||||
i915_query.o \
|
i915_query.o \
|
||||||
i915_request.o \
|
i915_request.o \
|
||||||
|
i915_timeline.o \
|
||||||
i915_trace_points.o \
|
i915_trace_points.o \
|
||||||
i915_vma.o \
|
i915_vma.o \
|
||||||
intel_breadcrumbs.o \
|
intel_breadcrumbs.o \
|
||||||
|
@ -72,10 +72,10 @@
|
|||||||
#include "i915_gem_fence_reg.h"
|
#include "i915_gem_fence_reg.h"
|
||||||
#include "i915_gem_object.h"
|
#include "i915_gem_object.h"
|
||||||
#include "i915_gem_gtt.h"
|
#include "i915_gem_gtt.h"
|
||||||
#include "i915_gem_timeline.h"
|
|
||||||
#include "i915_gpu_error.h"
|
#include "i915_gpu_error.h"
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "i915_scheduler.h"
|
#include "i915_scheduler.h"
|
||||||
|
#include "i915_timeline.h"
|
||||||
#include "i915_vma.h"
|
#include "i915_vma.h"
|
||||||
|
|
||||||
#include "intel_gvt.h"
|
#include "intel_gvt.h"
|
||||||
@ -2059,8 +2059,6 @@ struct drm_i915_private {
|
|||||||
void (*resume)(struct drm_i915_private *);
|
void (*resume)(struct drm_i915_private *);
|
||||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
struct i915_gem_timeline execution_timeline;
|
|
||||||
struct i915_gem_timeline legacy_timeline;
|
|
||||||
struct list_head timelines;
|
struct list_head timelines;
|
||||||
|
|
||||||
struct list_head active_rings;
|
struct list_head active_rings;
|
||||||
|
@ -162,7 +162,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
|
|||||||
synchronize_irq(i915->drm.irq);
|
synchronize_irq(i915->drm.irq);
|
||||||
|
|
||||||
intel_engines_park(i915);
|
intel_engines_park(i915);
|
||||||
i915_gem_timelines_park(i915);
|
i915_timelines_park(i915);
|
||||||
|
|
||||||
i915_pmu_gt_parked(i915);
|
i915_pmu_gt_parked(i915);
|
||||||
|
|
||||||
@ -2977,8 +2977,8 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
|
|||||||
* extra delay for a recent interrupt is pointless. Hence, we do
|
* extra delay for a recent interrupt is pointless. Hence, we do
|
||||||
* not need an engine->irq_seqno_barrier() before the seqno reads.
|
* not need an engine->irq_seqno_barrier() before the seqno reads.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
list_for_each_entry(request, &engine->timeline->requests, link) {
|
list_for_each_entry(request, &engine->timeline.requests, link) {
|
||||||
if (__i915_request_completed(request, request->global_seqno))
|
if (__i915_request_completed(request, request->global_seqno))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -2989,7 +2989,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
|
|||||||
active = request;
|
active = request;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
return active;
|
return active;
|
||||||
}
|
}
|
||||||
@ -3110,15 +3110,15 @@ static void engine_skip_context(struct i915_request *request)
|
|||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = request->engine;
|
struct intel_engine_cs *engine = request->engine;
|
||||||
struct i915_gem_context *hung_ctx = request->ctx;
|
struct i915_gem_context *hung_ctx = request->ctx;
|
||||||
struct intel_timeline *timeline = request->timeline;
|
struct i915_timeline *timeline = request->timeline;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
GEM_BUG_ON(timeline == engine->timeline);
|
GEM_BUG_ON(timeline == &engine->timeline);
|
||||||
|
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
spin_lock(&timeline->lock);
|
spin_lock(&timeline->lock);
|
||||||
|
|
||||||
list_for_each_entry_continue(request, &engine->timeline->requests, link)
|
list_for_each_entry_continue(request, &engine->timeline.requests, link)
|
||||||
if (request->ctx == hung_ctx)
|
if (request->ctx == hung_ctx)
|
||||||
skip_request(request);
|
skip_request(request);
|
||||||
|
|
||||||
@ -3126,7 +3126,7 @@ static void engine_skip_context(struct i915_request *request)
|
|||||||
skip_request(request);
|
skip_request(request);
|
||||||
|
|
||||||
spin_unlock(&timeline->lock);
|
spin_unlock(&timeline->lock);
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the request if it was guilty of the hang */
|
/* Returns the request if it was guilty of the hang */
|
||||||
@ -3183,11 +3183,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
|
|||||||
dma_fence_set_error(&request->fence, -EAGAIN);
|
dma_fence_set_error(&request->fence, -EAGAIN);
|
||||||
|
|
||||||
/* Rewind the engine to replay the incomplete rq */
|
/* Rewind the engine to replay the incomplete rq */
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
request = list_prev_entry(request, link);
|
request = list_prev_entry(request, link);
|
||||||
if (&request->link == &engine->timeline->requests)
|
if (&request->link == &engine->timeline.requests)
|
||||||
request = NULL;
|
request = NULL;
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3300,10 +3300,10 @@ static void nop_complete_submit_request(struct i915_request *request)
|
|||||||
request->fence.context, request->fence.seqno);
|
request->fence.context, request->fence.seqno);
|
||||||
dma_fence_set_error(&request->fence, -EIO);
|
dma_fence_set_error(&request->fence, -EIO);
|
||||||
|
|
||||||
spin_lock_irqsave(&request->engine->timeline->lock, flags);
|
spin_lock_irqsave(&request->engine->timeline.lock, flags);
|
||||||
__i915_request_submit(request);
|
__i915_request_submit(request);
|
||||||
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
||||||
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_set_wedged(struct drm_i915_private *i915)
|
void i915_gem_set_wedged(struct drm_i915_private *i915)
|
||||||
@ -3372,10 +3372,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||||||
* (lockless) lookup doesn't try and wait upon the request as we
|
* (lockless) lookup doesn't try and wait upon the request as we
|
||||||
* reset it.
|
* reset it.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
intel_engine_init_global_seqno(engine,
|
intel_engine_init_global_seqno(engine,
|
||||||
intel_engine_last_submit(engine));
|
intel_engine_last_submit(engine));
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
i915_gem_reset_finish_engine(engine);
|
i915_gem_reset_finish_engine(engine);
|
||||||
}
|
}
|
||||||
@ -3387,8 +3387,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct i915_gem_timeline *tl;
|
struct i915_timeline *tl;
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||||
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
|
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
|
||||||
@ -3407,29 +3406,27 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
|||||||
* No more can be submitted until we reset the wedged bit.
|
* No more can be submitted until we reset the wedged bit.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
||||||
for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
|
struct i915_request *rq;
|
||||||
struct i915_request *rq;
|
|
||||||
|
|
||||||
rq = i915_gem_active_peek(&tl->engine[i].last_request,
|
rq = i915_gem_active_peek(&tl->last_request,
|
||||||
&i915->drm.struct_mutex);
|
&i915->drm.struct_mutex);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can't use our normal waiter as we want to
|
* We can't use our normal waiter as we want to
|
||||||
* avoid recursively trying to handle the current
|
* avoid recursively trying to handle the current
|
||||||
* reset. The basic dma_fence_default_wait() installs
|
* reset. The basic dma_fence_default_wait() installs
|
||||||
* a callback for dma_fence_signal(), which is
|
* a callback for dma_fence_signal(), which is
|
||||||
* triggered by our nop handler (indirectly, the
|
* triggered by our nop handler (indirectly, the
|
||||||
* callback enables the signaler thread which is
|
* callback enables the signaler thread which is
|
||||||
* woken by the nop_submit_request() advancing the seqno
|
* woken by the nop_submit_request() advancing the seqno
|
||||||
* and when the seqno passes the fence, the signaler
|
* and when the seqno passes the fence, the signaler
|
||||||
* then signals the fence waking us up).
|
* then signals the fence waking us up).
|
||||||
*/
|
*/
|
||||||
if (dma_fence_default_wait(&rq->fence, true,
|
if (dma_fence_default_wait(&rq->fence, true,
|
||||||
MAX_SCHEDULE_TIMEOUT) < 0)
|
MAX_SCHEDULE_TIMEOUT) < 0)
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
i915_retire_requests(i915);
|
i915_retire_requests(i915);
|
||||||
GEM_BUG_ON(i915->gt.active_requests);
|
GEM_BUG_ON(i915->gt.active_requests);
|
||||||
@ -3734,17 +3731,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
|
static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
|
||||||
{
|
{
|
||||||
int ret, i;
|
return i915_gem_active_wait(&tl->last_request, flags);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
|
|
||||||
ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wait_for_engines(struct drm_i915_private *i915)
|
static int wait_for_engines(struct drm_i915_private *i915)
|
||||||
@ -3762,30 +3751,37 @@ static int wait_for_engines(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
|
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* If the device is asleep, we have no requests outstanding */
|
/* If the device is asleep, we have no requests outstanding */
|
||||||
if (!READ_ONCE(i915->gt.awake))
|
if (!READ_ONCE(i915->gt.awake))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (flags & I915_WAIT_LOCKED) {
|
if (flags & I915_WAIT_LOCKED) {
|
||||||
struct i915_gem_timeline *tl;
|
struct i915_timeline *tl;
|
||||||
|
int err;
|
||||||
|
|
||||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
||||||
ret = wait_for_timeline(tl, flags);
|
err = wait_for_timeline(tl, flags);
|
||||||
if (ret)
|
if (err)
|
||||||
return ret;
|
return err;
|
||||||
}
|
}
|
||||||
i915_retire_requests(i915);
|
i915_retire_requests(i915);
|
||||||
|
|
||||||
ret = wait_for_engines(i915);
|
return wait_for_engines(i915);
|
||||||
} else {
|
} else {
|
||||||
ret = wait_for_timeline(&i915->gt.execution_timeline, flags);
|
struct intel_engine_cs *engine;
|
||||||
}
|
enum intel_engine_id id;
|
||||||
|
int err;
|
||||||
|
|
||||||
return ret;
|
for_each_engine(engine, i915, id) {
|
||||||
|
err = wait_for_timeline(&engine->timeline, flags);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
|
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
|
||||||
@ -4954,7 +4950,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
|
|||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
|
GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
|
||||||
GEM_BUG_ON(engine->last_retired_context != kernel_context);
|
GEM_BUG_ON(engine->last_retired_context != kernel_context);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5603,12 +5599,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
|
|||||||
INIT_LIST_HEAD(&dev_priv->gt.timelines);
|
INIT_LIST_HEAD(&dev_priv->gt.timelines);
|
||||||
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
|
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
||||||
err = i915_gem_timeline_init__global(dev_priv);
|
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
if (err)
|
|
||||||
goto err_priorities;
|
|
||||||
|
|
||||||
i915_gem_init__mm(dev_priv);
|
i915_gem_init__mm(dev_priv);
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
|
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
|
||||||
@ -5628,8 +5618,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_priorities:
|
|
||||||
kmem_cache_destroy(dev_priv->priorities);
|
|
||||||
err_dependencies:
|
err_dependencies:
|
||||||
kmem_cache_destroy(dev_priv->dependencies);
|
kmem_cache_destroy(dev_priv->dependencies);
|
||||||
err_requests:
|
err_requests:
|
||||||
@ -5650,12 +5638,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
|
|||||||
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
|
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
|
||||||
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
|
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
|
||||||
WARN_ON(dev_priv->mm.object_count);
|
WARN_ON(dev_priv->mm.object_count);
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
|
||||||
i915_gem_timeline_fini(&dev_priv->gt.legacy_timeline);
|
|
||||||
i915_gem_timeline_fini(&dev_priv->gt.execution_timeline);
|
|
||||||
WARN_ON(!list_empty(&dev_priv->gt.timelines));
|
WARN_ON(!list_empty(&dev_priv->gt.timelines));
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
|
|
||||||
kmem_cache_destroy(dev_priv->priorities);
|
kmem_cache_destroy(dev_priv->priorities);
|
||||||
kmem_cache_destroy(dev_priv->dependencies);
|
kmem_cache_destroy(dev_priv->dependencies);
|
||||||
|
@ -122,7 +122,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
|
|||||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||||
|
|
||||||
i915_gem_timeline_free(ctx->timeline);
|
|
||||||
i915_ppgtt_put(ctx->ppgtt);
|
i915_ppgtt_put(ctx->ppgtt);
|
||||||
|
|
||||||
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
|
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
|
||||||
@ -377,18 +376,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
|
|||||||
ctx->desc_template = default_desc_template(dev_priv, ppgtt);
|
ctx->desc_template = default_desc_template(dev_priv, ppgtt);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (HAS_EXECLISTS(dev_priv)) {
|
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
|
|
||||||
timeline = i915_gem_timeline_create(dev_priv, ctx->name);
|
|
||||||
if (IS_ERR(timeline)) {
|
|
||||||
__destroy_hw_context(ctx, file_priv);
|
|
||||||
return ERR_CAST(timeline);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->timeline = timeline;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_i915_context_create(ctx);
|
trace_i915_context_create(ctx);
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
@ -590,19 +577,29 @@ void i915_gem_context_close(struct drm_file *file)
|
|||||||
idr_destroy(&file_priv->context_idr);
|
idr_destroy(&file_priv->context_idr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct i915_request *
|
||||||
|
last_request_on_engine(struct i915_timeline *timeline,
|
||||||
|
struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct i915_request *rq;
|
||||||
|
|
||||||
|
if (timeline == &engine->timeline)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
rq = i915_gem_active_raw(&timeline->last_request,
|
||||||
|
&engine->i915->drm.struct_mutex);
|
||||||
|
if (rq && rq->engine == engine)
|
||||||
|
return rq;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
|
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct i915_gem_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
|
|
||||||
list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
|
list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
|
||||||
struct intel_timeline *tl;
|
if (last_request_on_engine(timeline, engine))
|
||||||
|
|
||||||
if (timeline == &engine->i915->gt.execution_timeline)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
tl = &timeline->engine[engine->id];
|
|
||||||
if (i915_gem_active_peek(&tl->last_request,
|
|
||||||
&engine->i915->drm.struct_mutex))
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,7 +609,7 @@ static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
|
|||||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct i915_gem_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||||
@ -632,11 +629,8 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
|||||||
/* Queue this switch after all other activity */
|
/* Queue this switch after all other activity */
|
||||||
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
|
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
|
||||||
struct i915_request *prev;
|
struct i915_request *prev;
|
||||||
struct intel_timeline *tl;
|
|
||||||
|
|
||||||
tl = &timeline->engine[engine->id];
|
prev = last_request_on_engine(timeline, engine);
|
||||||
prev = i915_gem_active_raw(&tl->last_request,
|
|
||||||
&dev_priv->drm.struct_mutex);
|
|
||||||
if (prev)
|
if (prev)
|
||||||
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
|
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
|
||||||
&prev->submit,
|
&prev->submit,
|
||||||
|
@ -58,8 +58,6 @@ struct i915_gem_context {
|
|||||||
/** file_priv: owning file descriptor */
|
/** file_priv: owning file descriptor */
|
||||||
struct drm_i915_file_private *file_priv;
|
struct drm_i915_file_private *file_priv;
|
||||||
|
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @ppgtt: unique address space (GTT)
|
* @ppgtt: unique address space (GTT)
|
||||||
*
|
*
|
||||||
|
@ -38,10 +38,9 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/pagevec.h>
|
#include <linux/pagevec.h>
|
||||||
|
|
||||||
#include "i915_gem_timeline.h"
|
|
||||||
|
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "i915_selftest.h"
|
#include "i915_selftest.h"
|
||||||
|
#include "i915_timeline.h"
|
||||||
|
|
||||||
#define I915_GTT_PAGE_SIZE_4K BIT(12)
|
#define I915_GTT_PAGE_SIZE_4K BIT(12)
|
||||||
#define I915_GTT_PAGE_SIZE_64K BIT(16)
|
#define I915_GTT_PAGE_SIZE_64K BIT(16)
|
||||||
|
@ -1,198 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright © 2016 Intel Corporation
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the next
|
|
||||||
* paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
* Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
* IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "i915_drv.h"
|
|
||||||
#include "i915_syncmap.h"
|
|
||||||
|
|
||||||
static void __intel_timeline_init(struct intel_timeline *tl,
|
|
||||||
struct i915_gem_timeline *parent,
|
|
||||||
u64 context,
|
|
||||||
struct lock_class_key *lockclass,
|
|
||||||
const char *lockname)
|
|
||||||
{
|
|
||||||
tl->fence_context = context;
|
|
||||||
tl->common = parent;
|
|
||||||
spin_lock_init(&tl->lock);
|
|
||||||
lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
|
|
||||||
init_request_active(&tl->last_request, NULL);
|
|
||||||
INIT_LIST_HEAD(&tl->requests);
|
|
||||||
i915_syncmap_init(&tl->sync);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __intel_timeline_fini(struct intel_timeline *tl)
|
|
||||||
{
|
|
||||||
GEM_BUG_ON(!list_empty(&tl->requests));
|
|
||||||
|
|
||||||
i915_syncmap_free(&tl->sync);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __i915_gem_timeline_init(struct drm_i915_private *i915,
|
|
||||||
struct i915_gem_timeline *timeline,
|
|
||||||
const char *name,
|
|
||||||
struct lock_class_key *lockclass,
|
|
||||||
const char *lockname)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
u64 fences;
|
|
||||||
|
|
||||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ideally we want a set of engines on a single leaf as we expect
|
|
||||||
* to mostly be tracking synchronisation between engines. It is not
|
|
||||||
* a huge issue if this is not the case, but we may want to mitigate
|
|
||||||
* any page crossing penalties if they become an issue.
|
|
||||||
*/
|
|
||||||
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
|
|
||||||
|
|
||||||
timeline->i915 = i915;
|
|
||||||
timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
|
|
||||||
if (!timeline->name)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
list_add(&timeline->link, &i915->gt.timelines);
|
|
||||||
|
|
||||||
/* Called during early_init before we know how many engines there are */
|
|
||||||
fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
|
|
||||||
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
|
|
||||||
__intel_timeline_init(&timeline->engine[i],
|
|
||||||
timeline, fences++,
|
|
||||||
lockclass, lockname);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_gem_timeline_init(struct drm_i915_private *i915,
|
|
||||||
struct i915_gem_timeline *timeline,
|
|
||||||
const char *name)
|
|
||||||
{
|
|
||||||
static struct lock_class_key class;
|
|
||||||
|
|
||||||
return __i915_gem_timeline_init(i915, timeline, name,
|
|
||||||
&class, "&timeline->lock");
|
|
||||||
}
|
|
||||||
|
|
||||||
int i915_gem_timeline_init__global(struct drm_i915_private *i915)
|
|
||||||
{
|
|
||||||
static struct lock_class_key class1, class2;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = __i915_gem_timeline_init(i915,
|
|
||||||
&i915->gt.execution_timeline,
|
|
||||||
"[execution]", &class1,
|
|
||||||
"i915_execution_timeline");
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = __i915_gem_timeline_init(i915,
|
|
||||||
&i915->gt.legacy_timeline,
|
|
||||||
"[global]", &class2,
|
|
||||||
"i915_global_timeline");
|
|
||||||
if (err)
|
|
||||||
goto err_exec_timeline;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_exec_timeline:
|
|
||||||
i915_gem_timeline_fini(&i915->gt.execution_timeline);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* i915_gem_timelines_park - called when the driver idles
|
|
||||||
* @i915: the drm_i915_private device
|
|
||||||
*
|
|
||||||
* When the driver is completely idle, we know that all of our sync points
|
|
||||||
* have been signaled and our tracking is then entirely redundant. Any request
|
|
||||||
* to wait upon an older sync point will be completed instantly as we know
|
|
||||||
* the fence is signaled and therefore we will not even look them up in the
|
|
||||||
* sync point map.
|
|
||||||
*/
|
|
||||||
void i915_gem_timelines_park(struct drm_i915_private *i915)
|
|
||||||
{
|
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
list_for_each_entry(timeline, &i915->gt.timelines, link) {
|
|
||||||
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
|
|
||||||
struct intel_timeline *tl = &timeline->engine[i];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* All known fences are completed so we can scrap
|
|
||||||
* the current sync point tracking and start afresh,
|
|
||||||
* any attempt to wait upon a previous sync point
|
|
||||||
* will be skipped as the fence was signaled.
|
|
||||||
*/
|
|
||||||
i915_syncmap_free(&tl->sync);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&timeline->i915->drm.struct_mutex);
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
|
|
||||||
__intel_timeline_fini(&timeline->engine[i]);
|
|
||||||
|
|
||||||
list_del(&timeline->link);
|
|
||||||
kfree(timeline->name);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct i915_gem_timeline *
|
|
||||||
i915_gem_timeline_create(struct drm_i915_private *i915, const char *name)
|
|
||||||
{
|
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
|
|
||||||
if (!timeline)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
err = i915_gem_timeline_init(i915, timeline, name);
|
|
||||||
if (err) {
|
|
||||||
kfree(timeline);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
return timeline;
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_gem_timeline_free(struct i915_gem_timeline *timeline)
|
|
||||||
{
|
|
||||||
if (!timeline)
|
|
||||||
return;
|
|
||||||
|
|
||||||
i915_gem_timeline_fini(timeline);
|
|
||||||
kfree(timeline);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
||||||
#include "selftests/mock_timeline.c"
|
|
||||||
#include "selftests/i915_gem_timeline.c"
|
|
||||||
#endif
|
|
@ -1310,7 +1310,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
request = first;
|
request = first;
|
||||||
list_for_each_entry_from(request, &engine->timeline->requests, link)
|
list_for_each_entry_from(request, &engine->timeline.requests, link)
|
||||||
count++;
|
count++;
|
||||||
if (!count)
|
if (!count)
|
||||||
return;
|
return;
|
||||||
@ -1323,7 +1323,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
request = first;
|
request = first;
|
||||||
list_for_each_entry_from(request, &engine->timeline->requests, link) {
|
list_for_each_entry_from(request, &engine->timeline.requests, link) {
|
||||||
if (count >= ee->num_requests) {
|
if (count >= ee->num_requests) {
|
||||||
/*
|
/*
|
||||||
* If the ring request list was changed in
|
* If the ring request list was changed in
|
||||||
|
@ -1695,7 +1695,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
|
|||||||
const struct i915_oa_config *oa_config)
|
const struct i915_oa_config *oa_config)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = dev_priv->engine[RCS];
|
struct intel_engine_cs *engine = dev_priv->engine[RCS];
|
||||||
struct i915_gem_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
struct i915_request *rq;
|
struct i915_request *rq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1716,15 +1716,11 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
|
|||||||
/* Queue this switch after all other activity */
|
/* Queue this switch after all other activity */
|
||||||
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
|
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
|
||||||
struct i915_request *prev;
|
struct i915_request *prev;
|
||||||
struct intel_timeline *tl;
|
|
||||||
|
|
||||||
tl = &timeline->engine[engine->id];
|
prev = i915_gem_active_raw(&timeline->last_request,
|
||||||
prev = i915_gem_active_raw(&tl->last_request,
|
|
||||||
&dev_priv->drm.struct_mutex);
|
&dev_priv->drm.struct_mutex);
|
||||||
if (prev)
|
if (prev)
|
||||||
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
|
i915_request_await_dma_fence(rq, &prev->fence);
|
||||||
&prev->submit,
|
|
||||||
GFP_KERNEL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_request_add(rq);
|
i915_request_add(rq);
|
||||||
|
@ -49,7 +49,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
|
|||||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||||
return "signaled";
|
return "signaled";
|
||||||
|
|
||||||
return to_request(fence)->timeline->common->name;
|
return to_request(fence)->timeline->name;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool i915_fence_signaled(struct dma_fence *fence)
|
static bool i915_fence_signaled(struct dma_fence *fence)
|
||||||
@ -199,6 +199,7 @@ i915_sched_node_init(struct i915_sched_node *node)
|
|||||||
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
|
struct i915_timeline *timeline;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -213,16 +214,13 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
|||||||
|
|
||||||
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
||||||
for_each_engine(engine, i915, id) {
|
for_each_engine(engine, i915, id) {
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
struct intel_timeline *tl = engine->timeline;
|
|
||||||
|
|
||||||
GEM_TRACE("%s seqno %d (current %d) -> %d\n",
|
GEM_TRACE("%s seqno %d (current %d) -> %d\n",
|
||||||
engine->name,
|
engine->name,
|
||||||
tl->seqno,
|
engine->timeline.seqno,
|
||||||
intel_engine_get_seqno(engine),
|
intel_engine_get_seqno(engine),
|
||||||
seqno);
|
seqno);
|
||||||
|
|
||||||
if (!i915_seqno_passed(seqno, tl->seqno)) {
|
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
|
||||||
/* Flush any waiters before we reuse the seqno */
|
/* Flush any waiters before we reuse the seqno */
|
||||||
intel_engine_disarm_breadcrumbs(engine);
|
intel_engine_disarm_breadcrumbs(engine);
|
||||||
GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
|
GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
|
||||||
@ -230,18 +228,18 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
|||||||
|
|
||||||
/* Check we are idle before we fiddle with hw state! */
|
/* Check we are idle before we fiddle with hw state! */
|
||||||
GEM_BUG_ON(!intel_engine_is_idle(engine));
|
GEM_BUG_ON(!intel_engine_is_idle(engine));
|
||||||
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
|
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
|
||||||
|
|
||||||
/* Finally reset hw state */
|
/* Finally reset hw state */
|
||||||
intel_engine_init_global_seqno(engine, seqno);
|
intel_engine_init_global_seqno(engine, seqno);
|
||||||
tl->seqno = seqno;
|
engine->timeline.seqno = seqno;
|
||||||
|
|
||||||
list_for_each_entry(timeline, &i915->gt.timelines, link)
|
|
||||||
memset(timeline->engine[id].global_sync, 0,
|
|
||||||
sizeof(timeline->engine[id].global_sync));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list_for_each_entry(timeline, &i915->gt.timelines, link)
|
||||||
|
memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
|
||||||
|
|
||||||
i915->gt.request_serial = seqno;
|
i915->gt.request_serial = seqno;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,10 +355,10 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
spin_lock(&engine->timeline->lock);
|
spin_lock(&engine->timeline.lock);
|
||||||
GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline->requests));
|
GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
|
||||||
list_del_init(&rq->link);
|
list_del_init(&rq->link);
|
||||||
spin_unlock(&engine->timeline->lock);
|
spin_unlock(&engine->timeline.lock);
|
||||||
|
|
||||||
spin_lock(&rq->lock);
|
spin_lock(&rq->lock);
|
||||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
|
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
|
||||||
@ -397,7 +395,7 @@ static void __retire_engine_upto(struct intel_engine_cs *engine,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
tmp = list_first_entry(&engine->timeline->requests,
|
tmp = list_first_entry(&engine->timeline.requests,
|
||||||
typeof(*tmp), link);
|
typeof(*tmp), link);
|
||||||
|
|
||||||
GEM_BUG_ON(tmp->engine != engine);
|
GEM_BUG_ON(tmp->engine != engine);
|
||||||
@ -492,16 +490,16 @@ void i915_request_retire_upto(struct i915_request *rq)
|
|||||||
} while (tmp != rq);
|
} while (tmp != rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 timeline_get_seqno(struct intel_timeline *tl)
|
static u32 timeline_get_seqno(struct i915_timeline *tl)
|
||||||
{
|
{
|
||||||
return ++tl->seqno;
|
return ++tl->seqno;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void move_to_timeline(struct i915_request *request,
|
static void move_to_timeline(struct i915_request *request,
|
||||||
struct intel_timeline *timeline)
|
struct i915_timeline *timeline)
|
||||||
{
|
{
|
||||||
GEM_BUG_ON(request->timeline == request->engine->timeline);
|
GEM_BUG_ON(request->timeline == &request->engine->timeline);
|
||||||
lockdep_assert_held(&request->engine->timeline->lock);
|
lockdep_assert_held(&request->engine->timeline.lock);
|
||||||
|
|
||||||
spin_lock(&request->timeline->lock);
|
spin_lock(&request->timeline->lock);
|
||||||
list_move_tail(&request->link, &timeline->requests);
|
list_move_tail(&request->link, &timeline->requests);
|
||||||
@ -516,15 +514,15 @@ void __i915_request_submit(struct i915_request *request)
|
|||||||
GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
|
GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
|
||||||
engine->name,
|
engine->name,
|
||||||
request->fence.context, request->fence.seqno,
|
request->fence.context, request->fence.seqno,
|
||||||
engine->timeline->seqno + 1,
|
engine->timeline.seqno + 1,
|
||||||
intel_engine_get_seqno(engine));
|
intel_engine_get_seqno(engine));
|
||||||
|
|
||||||
GEM_BUG_ON(!irqs_disabled());
|
GEM_BUG_ON(!irqs_disabled());
|
||||||
lockdep_assert_held(&engine->timeline->lock);
|
lockdep_assert_held(&engine->timeline.lock);
|
||||||
|
|
||||||
GEM_BUG_ON(request->global_seqno);
|
GEM_BUG_ON(request->global_seqno);
|
||||||
|
|
||||||
seqno = timeline_get_seqno(engine->timeline);
|
seqno = timeline_get_seqno(&engine->timeline);
|
||||||
GEM_BUG_ON(!seqno);
|
GEM_BUG_ON(!seqno);
|
||||||
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
|
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
|
||||||
|
|
||||||
@ -539,7 +537,7 @@ void __i915_request_submit(struct i915_request *request)
|
|||||||
request->ring->vaddr + request->postfix);
|
request->ring->vaddr + request->postfix);
|
||||||
|
|
||||||
/* Transfer from per-context onto the global per-engine timeline */
|
/* Transfer from per-context onto the global per-engine timeline */
|
||||||
move_to_timeline(request, engine->timeline);
|
move_to_timeline(request, &engine->timeline);
|
||||||
|
|
||||||
trace_i915_request_execute(request);
|
trace_i915_request_execute(request);
|
||||||
|
|
||||||
@ -552,11 +550,11 @@ void i915_request_submit(struct i915_request *request)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Will be called from irq-context when using foreign fences. */
|
/* Will be called from irq-context when using foreign fences. */
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
__i915_request_submit(request);
|
__i915_request_submit(request);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __i915_request_unsubmit(struct i915_request *request)
|
void __i915_request_unsubmit(struct i915_request *request)
|
||||||
@ -570,17 +568,17 @@ void __i915_request_unsubmit(struct i915_request *request)
|
|||||||
intel_engine_get_seqno(engine));
|
intel_engine_get_seqno(engine));
|
||||||
|
|
||||||
GEM_BUG_ON(!irqs_disabled());
|
GEM_BUG_ON(!irqs_disabled());
|
||||||
lockdep_assert_held(&engine->timeline->lock);
|
lockdep_assert_held(&engine->timeline.lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only unwind in reverse order, required so that the per-context list
|
* Only unwind in reverse order, required so that the per-context list
|
||||||
* is kept in seqno/ring order.
|
* is kept in seqno/ring order.
|
||||||
*/
|
*/
|
||||||
GEM_BUG_ON(!request->global_seqno);
|
GEM_BUG_ON(!request->global_seqno);
|
||||||
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
|
GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
|
||||||
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
|
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
|
||||||
request->global_seqno));
|
request->global_seqno));
|
||||||
engine->timeline->seqno--;
|
engine->timeline.seqno--;
|
||||||
|
|
||||||
/* We may be recursing from the signal callback of another i915 fence */
|
/* We may be recursing from the signal callback of another i915 fence */
|
||||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||||
@ -607,11 +605,11 @@ void i915_request_unsubmit(struct i915_request *request)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Will be called from irq-context when using foreign fences. */
|
/* Will be called from irq-context when using foreign fences. */
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
__i915_request_unsubmit(request);
|
__i915_request_unsubmit(request);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __i915_sw_fence_call
|
static int __i915_sw_fence_call
|
||||||
@ -764,7 +762,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
|||||||
rq->ctx = ctx;
|
rq->ctx = ctx;
|
||||||
rq->ring = ring;
|
rq->ring = ring;
|
||||||
rq->timeline = ring->timeline;
|
rq->timeline = ring->timeline;
|
||||||
GEM_BUG_ON(rq->timeline == engine->timeline);
|
GEM_BUG_ON(rq->timeline == &engine->timeline);
|
||||||
|
|
||||||
spin_lock_init(&rq->lock);
|
spin_lock_init(&rq->lock);
|
||||||
dma_fence_init(&rq->fence,
|
dma_fence_init(&rq->fence,
|
||||||
@ -929,7 +927,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
|
|||||||
|
|
||||||
/* Squash repeated waits to the same timelines */
|
/* Squash repeated waits to the same timelines */
|
||||||
if (fence->context != rq->i915->mm.unordered_timeline &&
|
if (fence->context != rq->i915->mm.unordered_timeline &&
|
||||||
intel_timeline_sync_is_later(rq->timeline, fence))
|
i915_timeline_sync_is_later(rq->timeline, fence))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dma_fence_is_i915(fence))
|
if (dma_fence_is_i915(fence))
|
||||||
@ -943,7 +941,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
|
|||||||
|
|
||||||
/* Record the latest fence used against each timeline */
|
/* Record the latest fence used against each timeline */
|
||||||
if (fence->context != rq->i915->mm.unordered_timeline)
|
if (fence->context != rq->i915->mm.unordered_timeline)
|
||||||
intel_timeline_sync_set(rq->timeline, fence);
|
i915_timeline_sync_set(rq->timeline, fence);
|
||||||
} while (--nchild);
|
} while (--nchild);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1020,7 +1018,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
|
|||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = request->engine;
|
struct intel_engine_cs *engine = request->engine;
|
||||||
struct intel_ring *ring = request->ring;
|
struct intel_ring *ring = request->ring;
|
||||||
struct intel_timeline *timeline = request->timeline;
|
struct i915_timeline *timeline = request->timeline;
|
||||||
struct i915_request *prev;
|
struct i915_request *prev;
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
int err;
|
int err;
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
struct drm_file;
|
struct drm_file;
|
||||||
struct drm_i915_gem_object;
|
struct drm_i915_gem_object;
|
||||||
struct i915_request;
|
struct i915_request;
|
||||||
|
struct i915_timeline;
|
||||||
|
|
||||||
struct intel_wait {
|
struct intel_wait {
|
||||||
struct rb_node node;
|
struct rb_node node;
|
||||||
@ -95,7 +96,7 @@ struct i915_request {
|
|||||||
struct i915_gem_context *ctx;
|
struct i915_gem_context *ctx;
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
struct intel_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
struct intel_signal_node signaling;
|
struct intel_signal_node signaling;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
105
drivers/gpu/drm/i915/i915_timeline.c
Normal file
105
drivers/gpu/drm/i915/i915_timeline.c
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
/*
|
||||||
|
* SPDX-License-Identifier: MIT
|
||||||
|
*
|
||||||
|
* Copyright © 2016-2018 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "i915_drv.h"
|
||||||
|
|
||||||
|
#include "i915_timeline.h"
|
||||||
|
#include "i915_syncmap.h"
|
||||||
|
|
||||||
|
void i915_timeline_init(struct drm_i915_private *i915,
|
||||||
|
struct i915_timeline *timeline,
|
||||||
|
const char *name)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ideally we want a set of engines on a single leaf as we expect
|
||||||
|
* to mostly be tracking synchronisation between engines. It is not
|
||||||
|
* a huge issue if this is not the case, but we may want to mitigate
|
||||||
|
* any page crossing penalties if they become an issue.
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
|
||||||
|
|
||||||
|
timeline->name = name;
|
||||||
|
|
||||||
|
list_add(&timeline->link, &i915->gt.timelines);
|
||||||
|
|
||||||
|
/* Called during early_init before we know how many engines there are */
|
||||||
|
|
||||||
|
timeline->fence_context = dma_fence_context_alloc(1);
|
||||||
|
|
||||||
|
spin_lock_init(&timeline->lock);
|
||||||
|
|
||||||
|
init_request_active(&timeline->last_request, NULL);
|
||||||
|
INIT_LIST_HEAD(&timeline->requests);
|
||||||
|
|
||||||
|
i915_syncmap_init(&timeline->sync);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i915_timelines_park - called when the driver idles
|
||||||
|
* @i915: the drm_i915_private device
|
||||||
|
*
|
||||||
|
* When the driver is completely idle, we know that all of our sync points
|
||||||
|
* have been signaled and our tracking is then entirely redundant. Any request
|
||||||
|
* to wait upon an older sync point will be completed instantly as we know
|
||||||
|
* the fence is signaled and therefore we will not even look them up in the
|
||||||
|
* sync point map.
|
||||||
|
*/
|
||||||
|
void i915_timelines_park(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
struct i915_timeline *timeline;
|
||||||
|
|
||||||
|
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(timeline, &i915->gt.timelines, link) {
|
||||||
|
/*
|
||||||
|
* All known fences are completed so we can scrap
|
||||||
|
* the current sync point tracking and start afresh,
|
||||||
|
* any attempt to wait upon a previous sync point
|
||||||
|
* will be skipped as the fence was signaled.
|
||||||
|
*/
|
||||||
|
i915_syncmap_free(&timeline->sync);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void i915_timeline_fini(struct i915_timeline *timeline)
|
||||||
|
{
|
||||||
|
GEM_BUG_ON(!list_empty(&timeline->requests));
|
||||||
|
|
||||||
|
i915_syncmap_free(&timeline->sync);
|
||||||
|
|
||||||
|
list_del(&timeline->link);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct i915_timeline *
|
||||||
|
i915_timeline_create(struct drm_i915_private *i915, const char *name)
|
||||||
|
{
|
||||||
|
struct i915_timeline *timeline;
|
||||||
|
|
||||||
|
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
|
||||||
|
if (!timeline)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
i915_timeline_init(i915, timeline, name);
|
||||||
|
kref_init(&timeline->kref);
|
||||||
|
|
||||||
|
return timeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __i915_timeline_free(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct i915_timeline *timeline =
|
||||||
|
container_of(kref, typeof(*timeline), kref);
|
||||||
|
|
||||||
|
i915_timeline_fini(timeline);
|
||||||
|
kfree(timeline);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||||
|
#include "selftests/mock_timeline.c"
|
||||||
|
#include "selftests/i915_timeline.c"
|
||||||
|
#endif
|
@ -22,18 +22,17 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef I915_GEM_TIMELINE_H
|
#ifndef I915_TIMELINE_H
|
||||||
#define I915_GEM_TIMELINE_H
|
#define I915_TIMELINE_H
|
||||||
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/kref.h>
|
||||||
|
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "i915_syncmap.h"
|
#include "i915_syncmap.h"
|
||||||
#include "i915_utils.h"
|
#include "i915_utils.h"
|
||||||
|
|
||||||
struct i915_gem_timeline;
|
struct i915_timeline {
|
||||||
|
|
||||||
struct intel_timeline {
|
|
||||||
u64 fence_context;
|
u64 fence_context;
|
||||||
u32 seqno;
|
u32 seqno;
|
||||||
|
|
||||||
@ -71,51 +70,57 @@ struct intel_timeline {
|
|||||||
*/
|
*/
|
||||||
u32 global_sync[I915_NUM_ENGINES];
|
u32 global_sync[I915_NUM_ENGINES];
|
||||||
|
|
||||||
struct i915_gem_timeline *common;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct i915_gem_timeline {
|
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
|
|
||||||
struct drm_i915_private *i915;
|
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
struct intel_timeline engine[I915_NUM_ENGINES];
|
struct kref kref;
|
||||||
};
|
};
|
||||||
|
|
||||||
int i915_gem_timeline_init(struct drm_i915_private *i915,
|
void i915_timeline_init(struct drm_i915_private *i915,
|
||||||
struct i915_gem_timeline *tl,
|
struct i915_timeline *tl,
|
||||||
const char *name);
|
const char *name);
|
||||||
int i915_gem_timeline_init__global(struct drm_i915_private *i915);
|
void i915_timeline_fini(struct i915_timeline *tl);
|
||||||
void i915_gem_timelines_park(struct drm_i915_private *i915);
|
|
||||||
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
|
|
||||||
|
|
||||||
struct i915_gem_timeline *
|
struct i915_timeline *
|
||||||
i915_gem_timeline_create(struct drm_i915_private *i915, const char *name);
|
i915_timeline_create(struct drm_i915_private *i915, const char *name);
|
||||||
void i915_gem_timeline_free(struct i915_gem_timeline *timeline);
|
|
||||||
|
|
||||||
static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
|
static inline struct i915_timeline *
|
||||||
u64 context, u32 seqno)
|
i915_timeline_get(struct i915_timeline *timeline)
|
||||||
|
{
|
||||||
|
kref_get(&timeline->kref);
|
||||||
|
return timeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __i915_timeline_free(struct kref *kref);
|
||||||
|
static inline void i915_timeline_put(struct i915_timeline *timeline)
|
||||||
|
{
|
||||||
|
kref_put(&timeline->kref, __i915_timeline_free);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
|
||||||
|
u64 context, u32 seqno)
|
||||||
{
|
{
|
||||||
return i915_syncmap_set(&tl->sync, context, seqno);
|
return i915_syncmap_set(&tl->sync, context, seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int intel_timeline_sync_set(struct intel_timeline *tl,
|
static inline int i915_timeline_sync_set(struct i915_timeline *tl,
|
||||||
const struct dma_fence *fence)
|
const struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
|
return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
|
static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
|
||||||
u64 context, u32 seqno)
|
u64 context, u32 seqno)
|
||||||
{
|
{
|
||||||
return i915_syncmap_is_later(&tl->sync, context, seqno);
|
return i915_syncmap_is_later(&tl->sync, context, seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
|
static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
|
||||||
const struct dma_fence *fence)
|
const struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
|
return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void i915_timelines_park(struct drm_i915_private *i915);
|
||||||
|
|
||||||
#endif
|
#endif
|
@ -451,12 +451,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
|||||||
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
|
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_engine_init_timeline(struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
engine->timeline =
|
|
||||||
&engine->i915->gt.execution_timeline.engine[engine->id];
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
|
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
i915_gem_batch_pool_init(&engine->batch_pool, engine);
|
i915_gem_batch_pool_init(&engine->batch_pool, engine);
|
||||||
@ -508,8 +502,9 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
|
|||||||
*/
|
*/
|
||||||
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
|
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
|
||||||
|
|
||||||
intel_engine_init_execlist(engine);
|
intel_engine_init_execlist(engine);
|
||||||
intel_engine_init_timeline(engine);
|
|
||||||
intel_engine_init_hangcheck(engine);
|
intel_engine_init_hangcheck(engine);
|
||||||
intel_engine_init_batch_pool(engine);
|
intel_engine_init_batch_pool(engine);
|
||||||
intel_engine_init_cmd_parser(engine);
|
intel_engine_init_cmd_parser(engine);
|
||||||
@ -751,6 +746,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
|||||||
if (engine->i915->preempt_context)
|
if (engine->i915->preempt_context)
|
||||||
intel_context_unpin(engine->i915->preempt_context, engine);
|
intel_context_unpin(engine->i915->preempt_context, engine);
|
||||||
intel_context_unpin(engine->i915->kernel_context, engine);
|
intel_context_unpin(engine->i915->kernel_context, engine);
|
||||||
|
|
||||||
|
i915_timeline_fini(&engine->timeline);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
|
||||||
@ -1003,7 +1000,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
|
|||||||
* the last request that remains in the timeline. When idle, it is
|
* the last request that remains in the timeline. When idle, it is
|
||||||
* the last executed context as tracked by retirement.
|
* the last executed context as tracked by retirement.
|
||||||
*/
|
*/
|
||||||
rq = __i915_gem_active_peek(&engine->timeline->last_request);
|
rq = __i915_gem_active_peek(&engine->timeline.last_request);
|
||||||
if (rq)
|
if (rq)
|
||||||
return rq->ctx == kernel_context;
|
return rq->ctx == kernel_context;
|
||||||
else
|
else
|
||||||
@ -1335,14 +1332,14 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
drm_printf(m, "\tRequests:\n");
|
drm_printf(m, "\tRequests:\n");
|
||||||
|
|
||||||
rq = list_first_entry(&engine->timeline->requests,
|
rq = list_first_entry(&engine->timeline.requests,
|
||||||
struct i915_request, link);
|
struct i915_request, link);
|
||||||
if (&rq->link != &engine->timeline->requests)
|
if (&rq->link != &engine->timeline.requests)
|
||||||
print_request(m, rq, "\t\tfirst ");
|
print_request(m, rq, "\t\tfirst ");
|
||||||
|
|
||||||
rq = list_last_entry(&engine->timeline->requests,
|
rq = list_last_entry(&engine->timeline.requests,
|
||||||
struct i915_request, link);
|
struct i915_request, link);
|
||||||
if (&rq->link != &engine->timeline->requests)
|
if (&rq->link != &engine->timeline.requests)
|
||||||
print_request(m, rq, "\t\tlast ");
|
print_request(m, rq, "\t\tlast ");
|
||||||
|
|
||||||
rq = i915_gem_find_active_request(engine);
|
rq = i915_gem_find_active_request(engine);
|
||||||
@ -1374,11 +1371,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||||||
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
|
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
|
|
||||||
last = NULL;
|
last = NULL;
|
||||||
count = 0;
|
count = 0;
|
||||||
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||||
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
|
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
|
||||||
print_request(m, rq, "\t\tE ");
|
print_request(m, rq, "\t\tE ");
|
||||||
else
|
else
|
||||||
@ -1416,7 +1413,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||||||
print_request(m, last, "\t\tQ ");
|
print_request(m, last, "\t\tQ ");
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
|
|
||||||
spin_lock_irq(&b->rb_lock);
|
spin_lock_irq(&b->rb_lock);
|
||||||
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
|
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
|
||||||
|
@ -679,7 +679,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
|
|||||||
bool submit = false;
|
bool submit = false;
|
||||||
struct rb_node *rb;
|
struct rb_node *rb;
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
rb = execlists->first;
|
rb = execlists->first;
|
||||||
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
||||||
|
|
||||||
@ -750,7 +750,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
|
|||||||
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void guc_submission_tasklet(unsigned long data)
|
static void guc_submission_tasklet(unsigned long data)
|
||||||
|
@ -331,10 +331,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
|
|||||||
struct i915_priolist *uninitialized_var(p);
|
struct i915_priolist *uninitialized_var(p);
|
||||||
int last_prio = I915_PRIORITY_INVALID;
|
int last_prio = I915_PRIORITY_INVALID;
|
||||||
|
|
||||||
lockdep_assert_held(&engine->timeline->lock);
|
lockdep_assert_held(&engine->timeline.lock);
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(rq, rn,
|
list_for_each_entry_safe_reverse(rq, rn,
|
||||||
&engine->timeline->requests,
|
&engine->timeline.requests,
|
||||||
link) {
|
link) {
|
||||||
if (i915_request_completed(rq))
|
if (i915_request_completed(rq))
|
||||||
return;
|
return;
|
||||||
@ -358,9 +358,9 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
|
|||||||
struct intel_engine_cs *engine =
|
struct intel_engine_cs *engine =
|
||||||
container_of(execlists, typeof(*engine), execlists);
|
container_of(execlists, typeof(*engine), execlists);
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
__unwind_incomplete_requests(engine);
|
__unwind_incomplete_requests(engine);
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -584,7 +584,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
* and context switches) submission.
|
* and context switches) submission.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
rb = execlists->first;
|
rb = execlists->first;
|
||||||
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
||||||
|
|
||||||
@ -744,7 +744,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
|
|
||||||
if (submit) {
|
if (submit) {
|
||||||
execlists_user_begin(execlists, execlists->port);
|
execlists_user_begin(execlists, execlists->port);
|
||||||
@ -894,10 +894,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||||||
execlists_cancel_port_requests(execlists);
|
execlists_cancel_port_requests(execlists);
|
||||||
reset_irq(engine);
|
reset_irq(engine);
|
||||||
|
|
||||||
spin_lock(&engine->timeline->lock);
|
spin_lock(&engine->timeline.lock);
|
||||||
|
|
||||||
/* Mark all executing requests as skipped. */
|
/* Mark all executing requests as skipped. */
|
||||||
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||||
GEM_BUG_ON(!rq->global_seqno);
|
GEM_BUG_ON(!rq->global_seqno);
|
||||||
if (!i915_request_completed(rq))
|
if (!i915_request_completed(rq))
|
||||||
dma_fence_set_error(&rq->fence, -EIO);
|
dma_fence_set_error(&rq->fence, -EIO);
|
||||||
@ -929,7 +929,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||||||
execlists->first = NULL;
|
execlists->first = NULL;
|
||||||
GEM_BUG_ON(port_isset(execlists->port));
|
GEM_BUG_ON(port_isset(execlists->port));
|
||||||
|
|
||||||
spin_unlock(&engine->timeline->lock);
|
spin_unlock(&engine->timeline.lock);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
@ -1167,7 +1167,7 @@ static void execlists_submit_request(struct i915_request *request)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Will be called from irq-context when using foreign fences. */
|
/* Will be called from irq-context when using foreign fences. */
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
queue_request(engine, &request->sched, rq_prio(request));
|
queue_request(engine, &request->sched, rq_prio(request));
|
||||||
submit_queue(engine, rq_prio(request));
|
submit_queue(engine, rq_prio(request));
|
||||||
@ -1175,7 +1175,7 @@ static void execlists_submit_request(struct i915_request *request)
|
|||||||
GEM_BUG_ON(!engine->execlists.first);
|
GEM_BUG_ON(!engine->execlists.first);
|
||||||
GEM_BUG_ON(list_empty(&request->sched.link));
|
GEM_BUG_ON(list_empty(&request->sched.link));
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_request *sched_to_request(struct i915_sched_node *node)
|
static struct i915_request *sched_to_request(struct i915_sched_node *node)
|
||||||
@ -1191,8 +1191,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
|
|||||||
GEM_BUG_ON(!locked);
|
GEM_BUG_ON(!locked);
|
||||||
|
|
||||||
if (engine != locked) {
|
if (engine != locked) {
|
||||||
spin_unlock(&locked->timeline->lock);
|
spin_unlock(&locked->timeline.lock);
|
||||||
spin_lock(&engine->timeline->lock);
|
spin_lock(&engine->timeline.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return engine;
|
return engine;
|
||||||
@ -1275,7 +1275,7 @@ static void execlists_schedule(struct i915_request *request,
|
|||||||
}
|
}
|
||||||
|
|
||||||
engine = request->engine;
|
engine = request->engine;
|
||||||
spin_lock_irq(&engine->timeline->lock);
|
spin_lock_irq(&engine->timeline.lock);
|
||||||
|
|
||||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||||
@ -1299,7 +1299,7 @@ static void execlists_schedule(struct i915_request *request,
|
|||||||
__submit_queue(engine, prio);
|
__submit_queue(engine, prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&engine->timeline->lock);
|
spin_unlock_irq(&engine->timeline.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
|
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
|
||||||
@ -1828,9 +1828,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||||||
reset_irq(engine);
|
reset_irq(engine);
|
||||||
|
|
||||||
/* Push back any incomplete requests for replay after the reset. */
|
/* Push back any incomplete requests for replay after the reset. */
|
||||||
spin_lock(&engine->timeline->lock);
|
spin_lock(&engine->timeline.lock);
|
||||||
__unwind_incomplete_requests(engine);
|
__unwind_incomplete_requests(engine);
|
||||||
spin_unlock(&engine->timeline->lock);
|
spin_unlock(&engine->timeline.lock);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
@ -2599,6 +2599,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
uint32_t context_size;
|
uint32_t context_size;
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
struct i915_timeline *timeline;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ce->state)
|
if (ce->state)
|
||||||
@ -2614,8 +2615,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||||||
|
|
||||||
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
|
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
|
||||||
if (IS_ERR(ctx_obj)) {
|
if (IS_ERR(ctx_obj)) {
|
||||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
ret = PTR_ERR(ctx_obj);
|
||||||
return PTR_ERR(ctx_obj);
|
goto error_deref_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
|
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
|
||||||
@ -2624,7 +2625,14 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||||||
goto error_deref_obj;
|
goto error_deref_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
ring = intel_engine_create_ring(engine, ctx->timeline, ctx->ring_size);
|
timeline = i915_timeline_create(ctx->i915, ctx->name);
|
||||||
|
if (IS_ERR(timeline)) {
|
||||||
|
ret = PTR_ERR(timeline);
|
||||||
|
goto error_deref_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
|
||||||
|
i915_timeline_put(timeline);
|
||||||
if (IS_ERR(ring)) {
|
if (IS_ERR(ring)) {
|
||||||
ret = PTR_ERR(ring);
|
ret = PTR_ERR(ring);
|
||||||
goto error_deref_obj;
|
goto error_deref_obj;
|
||||||
|
@ -697,17 +697,17 @@ static void cancel_requests(struct intel_engine_cs *engine)
|
|||||||
struct i915_request *request;
|
struct i915_request *request;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||||
|
|
||||||
/* Mark all submitted requests as skipped. */
|
/* Mark all submitted requests as skipped. */
|
||||||
list_for_each_entry(request, &engine->timeline->requests, link) {
|
list_for_each_entry(request, &engine->timeline.requests, link) {
|
||||||
GEM_BUG_ON(!request->global_seqno);
|
GEM_BUG_ON(!request->global_seqno);
|
||||||
if (!i915_request_completed(request))
|
if (!i915_request_completed(request))
|
||||||
dma_fence_set_error(&request->fence, -EIO);
|
dma_fence_set_error(&request->fence, -EIO);
|
||||||
}
|
}
|
||||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||||
|
|
||||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i9xx_submit_request(struct i915_request *request)
|
static void i9xx_submit_request(struct i915_request *request)
|
||||||
@ -1118,7 +1118,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
|
|||||||
|
|
||||||
struct intel_ring *
|
struct intel_ring *
|
||||||
intel_engine_create_ring(struct intel_engine_cs *engine,
|
intel_engine_create_ring(struct intel_engine_cs *engine,
|
||||||
struct i915_gem_timeline *timeline,
|
struct i915_timeline *timeline,
|
||||||
int size)
|
int size)
|
||||||
{
|
{
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
@ -1126,7 +1126,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
GEM_BUG_ON(!is_power_of_2(size));
|
GEM_BUG_ON(!is_power_of_2(size));
|
||||||
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
|
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
|
||||||
GEM_BUG_ON(&timeline->engine[engine->id] == engine->timeline);
|
GEM_BUG_ON(timeline == &engine->timeline);
|
||||||
lockdep_assert_held(&engine->i915->drm.struct_mutex);
|
lockdep_assert_held(&engine->i915->drm.struct_mutex);
|
||||||
|
|
||||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||||
@ -1134,7 +1134,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
ring->timeline = &timeline->engine[engine->id];
|
ring->timeline = i915_timeline_get(timeline);
|
||||||
|
|
||||||
ring->size = size;
|
ring->size = size;
|
||||||
/* Workaround an erratum on the i830 which causes a hang if
|
/* Workaround an erratum on the i830 which causes a hang if
|
||||||
@ -1165,6 +1165,7 @@ intel_ring_free(struct intel_ring *ring)
|
|||||||
i915_vma_close(ring->vma);
|
i915_vma_close(ring->vma);
|
||||||
__i915_gem_object_release_unless_active(obj);
|
__i915_gem_object_release_unless_active(obj);
|
||||||
|
|
||||||
|
i915_timeline_put(ring->timeline);
|
||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1323,6 +1324,7 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
|
|||||||
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct intel_ring *ring;
|
struct intel_ring *ring;
|
||||||
|
struct i915_timeline *timeline;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
intel_engine_setup_common(engine);
|
intel_engine_setup_common(engine);
|
||||||
@ -1331,9 +1333,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ring = intel_engine_create_ring(engine,
|
timeline = i915_timeline_create(engine->i915, engine->name);
|
||||||
&engine->i915->gt.legacy_timeline,
|
if (IS_ERR(timeline)) {
|
||||||
32 * PAGE_SIZE);
|
err = PTR_ERR(timeline);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
|
||||||
|
i915_timeline_put(timeline);
|
||||||
if (IS_ERR(ring)) {
|
if (IS_ERR(ring)) {
|
||||||
err = PTR_ERR(ring);
|
err = PTR_ERR(ring);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -6,12 +6,12 @@
|
|||||||
#include <linux/seqlock.h>
|
#include <linux/seqlock.h>
|
||||||
|
|
||||||
#include "i915_gem_batch_pool.h"
|
#include "i915_gem_batch_pool.h"
|
||||||
#include "i915_gem_timeline.h"
|
|
||||||
|
|
||||||
#include "i915_reg.h"
|
#include "i915_reg.h"
|
||||||
#include "i915_pmu.h"
|
#include "i915_pmu.h"
|
||||||
#include "i915_request.h"
|
#include "i915_request.h"
|
||||||
#include "i915_selftest.h"
|
#include "i915_selftest.h"
|
||||||
|
#include "i915_timeline.h"
|
||||||
#include "intel_gpu_commands.h"
|
#include "intel_gpu_commands.h"
|
||||||
|
|
||||||
struct drm_printer;
|
struct drm_printer;
|
||||||
@ -129,7 +129,7 @@ struct intel_ring {
|
|||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
|
||||||
struct intel_timeline *timeline;
|
struct i915_timeline *timeline;
|
||||||
struct list_head request_list;
|
struct list_head request_list;
|
||||||
struct list_head active_link;
|
struct list_head active_link;
|
||||||
|
|
||||||
@ -338,7 +338,8 @@ struct intel_engine_cs {
|
|||||||
u32 mmio_base;
|
u32 mmio_base;
|
||||||
|
|
||||||
struct intel_ring *buffer;
|
struct intel_ring *buffer;
|
||||||
struct intel_timeline *timeline;
|
|
||||||
|
struct i915_timeline timeline;
|
||||||
|
|
||||||
struct drm_i915_gem_object *default_state;
|
struct drm_i915_gem_object *default_state;
|
||||||
|
|
||||||
@ -770,7 +771,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
|
|||||||
|
|
||||||
struct intel_ring *
|
struct intel_ring *
|
||||||
intel_engine_create_ring(struct intel_engine_cs *engine,
|
intel_engine_create_ring(struct intel_engine_cs *engine,
|
||||||
struct i915_gem_timeline *timeline,
|
struct i915_timeline *timeline,
|
||||||
int size);
|
int size);
|
||||||
int intel_ring_pin(struct intel_ring *ring,
|
int intel_ring_pin(struct intel_ring *ring,
|
||||||
struct drm_i915_private *i915,
|
struct drm_i915_private *i915,
|
||||||
@ -889,7 +890,7 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
|
|||||||
* wtih serialising this hint with anything, so document it as
|
* wtih serialising this hint with anything, so document it as
|
||||||
* a hint and nothing more.
|
* a hint and nothing more.
|
||||||
*/
|
*/
|
||||||
return READ_ONCE(engine->timeline->seqno);
|
return READ_ONCE(engine->timeline.seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||||
|
@ -355,18 +355,6 @@ static int igt_ctx_exec(void *arg)
|
|||||||
|
|
||||||
if (first_shared_gtt) {
|
if (first_shared_gtt) {
|
||||||
ctx = __create_hw_context(i915, file->driver_priv);
|
ctx = __create_hw_context(i915, file->driver_priv);
|
||||||
if (!IS_ERR(ctx) && HAS_EXECLISTS(i915)) {
|
|
||||||
struct i915_gem_timeline *timeline;
|
|
||||||
|
|
||||||
timeline = i915_gem_timeline_create(i915, ctx->name);
|
|
||||||
if (IS_ERR(timeline)) {
|
|
||||||
__destroy_hw_context(ctx, file->driver_priv);
|
|
||||||
ctx = ERR_CAST(timeline);
|
|
||||||
} else {
|
|
||||||
ctx->timeline = timeline;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
first_shared_gtt = false;
|
first_shared_gtt = false;
|
||||||
} else {
|
} else {
|
||||||
ctx = i915_gem_create_context(i915, file->driver_priv);
|
ctx = i915_gem_create_context(i915, file->driver_priv);
|
||||||
|
@ -1,25 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright © 2017 Intel Corporation
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the next
|
|
||||||
* paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
* Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
* IN THE SOFTWARE.
|
|
||||||
*
|
*
|
||||||
|
* Copyright © 2017-2018 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "../i915_selftest.h"
|
#include "../i915_selftest.h"
|
||||||
@ -35,21 +17,21 @@ struct __igt_sync {
|
|||||||
bool set;
|
bool set;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __igt_sync(struct intel_timeline *tl,
|
static int __igt_sync(struct i915_timeline *tl,
|
||||||
u64 ctx,
|
u64 ctx,
|
||||||
const struct __igt_sync *p,
|
const struct __igt_sync *p,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
|
if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
|
||||||
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
|
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
|
||||||
name, p->name, ctx, p->seqno, yesno(p->expected));
|
name, p->name, ctx, p->seqno, yesno(p->expected));
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p->set) {
|
if (p->set) {
|
||||||
ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
|
ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -77,37 +59,31 @@ static int igt_sync(void *arg)
|
|||||||
{ "unwrap", UINT_MAX, true, false },
|
{ "unwrap", UINT_MAX, true, false },
|
||||||
{},
|
{},
|
||||||
}, *p;
|
}, *p;
|
||||||
struct intel_timeline *tl;
|
struct i915_timeline tl;
|
||||||
int order, offset;
|
int order, offset;
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
mock_timeline_init(&tl, 0);
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
for (p = pass; p->name; p++) {
|
for (p = pass; p->name; p++) {
|
||||||
for (order = 1; order < 64; order++) {
|
for (order = 1; order < 64; order++) {
|
||||||
for (offset = -1; offset <= (order > 1); offset++) {
|
for (offset = -1; offset <= (order > 1); offset++) {
|
||||||
u64 ctx = BIT_ULL(order) + offset;
|
u64 ctx = BIT_ULL(order) + offset;
|
||||||
|
|
||||||
ret = __igt_sync(tl, ctx, p, "1");
|
ret = __igt_sync(&tl, ctx, p, "1");
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
|
mock_timeline_init(&tl, 0);
|
||||||
for (order = 1; order < 64; order++) {
|
for (order = 1; order < 64; order++) {
|
||||||
for (offset = -1; offset <= (order > 1); offset++) {
|
for (offset = -1; offset <= (order > 1); offset++) {
|
||||||
u64 ctx = BIT_ULL(order) + offset;
|
u64 ctx = BIT_ULL(order) + offset;
|
||||||
|
|
||||||
for (p = pass; p->name; p++) {
|
for (p = pass; p->name; p++) {
|
||||||
ret = __igt_sync(tl, ctx, p, "2");
|
ret = __igt_sync(&tl, ctx, p, "2");
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -115,7 +91,7 @@ static int igt_sync(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,15 +103,13 @@ static unsigned int random_engine(struct rnd_state *rnd)
|
|||||||
static int bench_sync(void *arg)
|
static int bench_sync(void *arg)
|
||||||
{
|
{
|
||||||
struct rnd_state prng;
|
struct rnd_state prng;
|
||||||
struct intel_timeline *tl;
|
struct i915_timeline tl;
|
||||||
unsigned long end_time, count;
|
unsigned long end_time, count;
|
||||||
u64 prng32_1M;
|
u64 prng32_1M;
|
||||||
ktime_t kt;
|
ktime_t kt;
|
||||||
int order, last_order;
|
int order, last_order;
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
mock_timeline_init(&tl, 0);
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Lookups from cache are very fast and so the random number generation
|
/* Lookups from cache are very fast and so the random number generation
|
||||||
* and the loop itself becomes a significant factor in the per-iteration
|
* and the loop itself becomes a significant factor in the per-iteration
|
||||||
@ -167,7 +141,7 @@ static int bench_sync(void *arg)
|
|||||||
do {
|
do {
|
||||||
u64 id = i915_prandom_u64_state(&prng);
|
u64 id = i915_prandom_u64_state(&prng);
|
||||||
|
|
||||||
__intel_timeline_sync_set(tl, id, 0);
|
__i915_timeline_sync_set(&tl, id, 0);
|
||||||
count++;
|
count++;
|
||||||
} while (!time_after(jiffies, end_time));
|
} while (!time_after(jiffies, end_time));
|
||||||
kt = ktime_sub(ktime_get(), kt);
|
kt = ktime_sub(ktime_get(), kt);
|
||||||
@ -182,8 +156,8 @@ static int bench_sync(void *arg)
|
|||||||
while (end_time--) {
|
while (end_time--) {
|
||||||
u64 id = i915_prandom_u64_state(&prng);
|
u64 id = i915_prandom_u64_state(&prng);
|
||||||
|
|
||||||
if (!__intel_timeline_sync_is_later(tl, id, 0)) {
|
if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
pr_err("Lookup of %llu failed\n", id);
|
pr_err("Lookup of %llu failed\n", id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -193,19 +167,17 @@ static int bench_sync(void *arg)
|
|||||||
pr_info("%s: %lu random lookups, %lluns/lookup\n",
|
pr_info("%s: %lu random lookups, %lluns/lookup\n",
|
||||||
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
||||||
|
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
mock_timeline_init(&tl, 0);
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Benchmark setting the first N (in order) contexts */
|
/* Benchmark setting the first N (in order) contexts */
|
||||||
count = 0;
|
count = 0;
|
||||||
kt = ktime_get();
|
kt = ktime_get();
|
||||||
end_time = jiffies + HZ/10;
|
end_time = jiffies + HZ/10;
|
||||||
do {
|
do {
|
||||||
__intel_timeline_sync_set(tl, count++, 0);
|
__i915_timeline_sync_set(&tl, count++, 0);
|
||||||
} while (!time_after(jiffies, end_time));
|
} while (!time_after(jiffies, end_time));
|
||||||
kt = ktime_sub(ktime_get(), kt);
|
kt = ktime_sub(ktime_get(), kt);
|
||||||
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
|
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
|
||||||
@ -215,9 +187,9 @@ static int bench_sync(void *arg)
|
|||||||
end_time = count;
|
end_time = count;
|
||||||
kt = ktime_get();
|
kt = ktime_get();
|
||||||
while (end_time--) {
|
while (end_time--) {
|
||||||
if (!__intel_timeline_sync_is_later(tl, end_time, 0)) {
|
if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
|
||||||
pr_err("Lookup of %lu failed\n", end_time);
|
pr_err("Lookup of %lu failed\n", end_time);
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,12 +197,10 @@ static int bench_sync(void *arg)
|
|||||||
pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
|
pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
|
||||||
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
||||||
|
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
mock_timeline_init(&tl, 0);
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Benchmark searching for a random context id and maybe changing it */
|
/* Benchmark searching for a random context id and maybe changing it */
|
||||||
prandom_seed_state(&prng, i915_selftest.random_seed);
|
prandom_seed_state(&prng, i915_selftest.random_seed);
|
||||||
@ -241,8 +211,8 @@ static int bench_sync(void *arg)
|
|||||||
u32 id = random_engine(&prng);
|
u32 id = random_engine(&prng);
|
||||||
u32 seqno = prandom_u32_state(&prng);
|
u32 seqno = prandom_u32_state(&prng);
|
||||||
|
|
||||||
if (!__intel_timeline_sync_is_later(tl, id, seqno))
|
if (!__i915_timeline_sync_is_later(&tl, id, seqno))
|
||||||
__intel_timeline_sync_set(tl, id, seqno);
|
__i915_timeline_sync_set(&tl, id, seqno);
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
} while (!time_after(jiffies, end_time));
|
} while (!time_after(jiffies, end_time));
|
||||||
@ -250,7 +220,7 @@ static int bench_sync(void *arg)
|
|||||||
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
|
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
|
||||||
pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
|
pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
|
||||||
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
/* Benchmark searching for a known context id and changing the seqno */
|
/* Benchmark searching for a known context id and changing the seqno */
|
||||||
@ -258,9 +228,7 @@ static int bench_sync(void *arg)
|
|||||||
({ int tmp = last_order; last_order = order; order += tmp; })) {
|
({ int tmp = last_order; last_order = order; order += tmp; })) {
|
||||||
unsigned int mask = BIT(order) - 1;
|
unsigned int mask = BIT(order) - 1;
|
||||||
|
|
||||||
tl = mock_timeline(0);
|
mock_timeline_init(&tl, 0);
|
||||||
if (!tl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
kt = ktime_get();
|
kt = ktime_get();
|
||||||
@ -272,8 +240,8 @@ static int bench_sync(void *arg)
|
|||||||
*/
|
*/
|
||||||
u64 id = (u64)(count & mask) << order;
|
u64 id = (u64)(count & mask) << order;
|
||||||
|
|
||||||
__intel_timeline_sync_is_later(tl, id, 0);
|
__i915_timeline_sync_is_later(&tl, id, 0);
|
||||||
__intel_timeline_sync_set(tl, id, 0);
|
__i915_timeline_sync_set(&tl, id, 0);
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
} while (!time_after(jiffies, end_time));
|
} while (!time_after(jiffies, end_time));
|
||||||
@ -281,7 +249,7 @@ static int bench_sync(void *arg)
|
|||||||
pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
|
pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
|
||||||
__func__, count, order,
|
__func__, count, order,
|
||||||
(long long)div64_ul(ktime_to_ns(kt), count));
|
(long long)div64_ul(ktime_to_ns(kt), count));
|
||||||
mock_timeline_destroy(tl);
|
mock_timeline_fini(&tl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
@ -25,6 +25,11 @@
|
|||||||
#include "mock_engine.h"
|
#include "mock_engine.h"
|
||||||
#include "mock_request.h"
|
#include "mock_request.h"
|
||||||
|
|
||||||
|
struct mock_ring {
|
||||||
|
struct intel_ring base;
|
||||||
|
struct i915_timeline timeline;
|
||||||
|
};
|
||||||
|
|
||||||
static struct mock_request *first_request(struct mock_engine *engine)
|
static struct mock_request *first_request(struct mock_engine *engine)
|
||||||
{
|
{
|
||||||
return list_first_entry_or_null(&engine->hw_queue,
|
return list_first_entry_or_null(&engine->hw_queue,
|
||||||
@ -132,7 +137,7 @@ static void mock_submit_request(struct i915_request *request)
|
|||||||
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
const unsigned long sz = PAGE_SIZE / 2;
|
const unsigned long sz = PAGE_SIZE / 2;
|
||||||
struct intel_ring *ring;
|
struct mock_ring *ring;
|
||||||
|
|
||||||
BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
|
BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
|
||||||
|
|
||||||
@ -140,20 +145,24 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
|||||||
if (!ring)
|
if (!ring)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ring->timeline = &engine->i915->gt.legacy_timeline.engine[engine->id];
|
i915_timeline_init(engine->i915, &ring->timeline, engine->name);
|
||||||
|
|
||||||
ring->size = sz;
|
ring->base.size = sz;
|
||||||
ring->effective_size = sz;
|
ring->base.effective_size = sz;
|
||||||
ring->vaddr = (void *)(ring + 1);
|
ring->base.vaddr = (void *)(ring + 1);
|
||||||
|
ring->base.timeline = &ring->timeline;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->base.request_list);
|
||||||
intel_ring_update_space(ring);
|
intel_ring_update_space(&ring->base);
|
||||||
|
|
||||||
return ring;
|
return &ring->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mock_ring_free(struct intel_ring *ring)
|
static void mock_ring_free(struct intel_ring *base)
|
||||||
{
|
{
|
||||||
|
struct mock_ring *ring = container_of(base, typeof(*ring), base);
|
||||||
|
|
||||||
|
i915_timeline_fini(&ring->timeline);
|
||||||
kfree(ring);
|
kfree(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,8 +191,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
|||||||
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
|
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
|
||||||
engine->base.submit_request = mock_submit_request;
|
engine->base.submit_request = mock_submit_request;
|
||||||
|
|
||||||
intel_engine_init_timeline(&engine->base);
|
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
|
||||||
|
|
||||||
intel_engine_init_breadcrumbs(&engine->base);
|
intel_engine_init_breadcrumbs(&engine->base);
|
||||||
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
|
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
|
||||||
|
|
||||||
@ -200,6 +208,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
|||||||
|
|
||||||
err_breadcrumbs:
|
err_breadcrumbs:
|
||||||
intel_engine_fini_breadcrumbs(&engine->base);
|
intel_engine_fini_breadcrumbs(&engine->base);
|
||||||
|
i915_timeline_fini(&engine->base.timeline);
|
||||||
kfree(engine);
|
kfree(engine);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -238,6 +247,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
|
|||||||
mock_ring_free(engine->buffer);
|
mock_ring_free(engine->buffer);
|
||||||
|
|
||||||
intel_engine_fini_breadcrumbs(engine);
|
intel_engine_fini_breadcrumbs(engine);
|
||||||
|
i915_timeline_fini(&engine->timeline);
|
||||||
|
|
||||||
kfree(engine);
|
kfree(engine);
|
||||||
}
|
}
|
||||||
|
@ -73,10 +73,8 @@ static void mock_device_release(struct drm_device *dev)
|
|||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
mutex_lock(&i915->drm.struct_mutex);
|
||||||
mock_fini_ggtt(i915);
|
mock_fini_ggtt(i915);
|
||||||
i915_gem_timeline_fini(&i915->gt.legacy_timeline);
|
|
||||||
i915_gem_timeline_fini(&i915->gt.execution_timeline);
|
|
||||||
WARN_ON(!list_empty(&i915->gt.timelines));
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
mutex_unlock(&i915->drm.struct_mutex);
|
||||||
|
WARN_ON(!list_empty(&i915->gt.timelines));
|
||||||
|
|
||||||
destroy_workqueue(i915->wq);
|
destroy_workqueue(i915->wq);
|
||||||
|
|
||||||
@ -230,12 +228,6 @@ struct drm_i915_private *mock_gem_device(void)
|
|||||||
INIT_LIST_HEAD(&i915->gt.active_rings);
|
INIT_LIST_HEAD(&i915->gt.active_rings);
|
||||||
|
|
||||||
mutex_lock(&i915->drm.struct_mutex);
|
mutex_lock(&i915->drm.struct_mutex);
|
||||||
err = i915_gem_timeline_init__global(i915);
|
|
||||||
if (err) {
|
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
|
||||||
goto err_priorities;
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_init_ggtt(i915);
|
mock_init_ggtt(i915);
|
||||||
mutex_unlock(&i915->drm.struct_mutex);
|
mutex_unlock(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
|
@ -1,45 +1,28 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright © 2017 Intel Corporation
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the next
|
|
||||||
* paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
* Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
* IN THE SOFTWARE.
|
|
||||||
*
|
*
|
||||||
|
* Copyright © 2017-2018 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "../i915_timeline.h"
|
||||||
|
|
||||||
#include "mock_timeline.h"
|
#include "mock_timeline.h"
|
||||||
|
|
||||||
struct intel_timeline *mock_timeline(u64 context)
|
void mock_timeline_init(struct i915_timeline *timeline, u64 context)
|
||||||
{
|
{
|
||||||
static struct lock_class_key class;
|
timeline->fence_context = context;
|
||||||
struct intel_timeline *tl;
|
|
||||||
|
|
||||||
tl = kzalloc(sizeof(*tl), GFP_KERNEL);
|
spin_lock_init(&timeline->lock);
|
||||||
if (!tl)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
__intel_timeline_init(tl, NULL, context, &class, "mock");
|
init_request_active(&timeline->last_request, NULL);
|
||||||
|
INIT_LIST_HEAD(&timeline->requests);
|
||||||
|
|
||||||
return tl;
|
i915_syncmap_init(&timeline->sync);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&timeline->link);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mock_timeline_destroy(struct intel_timeline *tl)
|
void mock_timeline_fini(struct i915_timeline *timeline)
|
||||||
{
|
{
|
||||||
__intel_timeline_fini(tl);
|
i915_timeline_fini(timeline);
|
||||||
kfree(tl);
|
|
||||||
}
|
}
|
||||||
|
@ -1,33 +1,15 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright © 2017 Intel Corporation
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice (including the next
|
|
||||||
* paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
* Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
* IN THE SOFTWARE.
|
|
||||||
*
|
*
|
||||||
|
* Copyright © 2017-2018 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __MOCK_TIMELINE__
|
#ifndef __MOCK_TIMELINE__
|
||||||
#define __MOCK_TIMELINE__
|
#define __MOCK_TIMELINE__
|
||||||
|
|
||||||
#include "../i915_gem_timeline.h"
|
struct i915_timeline;
|
||||||
|
|
||||||
struct intel_timeline *mock_timeline(u64 context);
|
void mock_timeline_init(struct i915_timeline *timeline, u64 context);
|
||||||
void mock_timeline_destroy(struct intel_timeline *tl);
|
void mock_timeline_fini(struct i915_timeline *timeline);
|
||||||
|
|
||||||
#endif /* !__MOCK_TIMELINE__ */
|
#endif /* !__MOCK_TIMELINE__ */
|
||||||
|
Loading…
Reference in New Issue
Block a user