mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915: Export intel_context_instance()
We want to pass in a intel_context into intel_context_pin() and that requires us to first be able to lookup the intel_context! Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190426163336.15906-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
251d46b087
commit
fa9f668141
@ -104,7 +104,7 @@ void __intel_context_remove(struct intel_context *ce)
|
||||
spin_unlock(&ctx->hw_contexts_lock);
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
struct intel_context *
|
||||
intel_context_instance(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
@ -112,7 +112,7 @@ intel_context_instance(struct i915_gem_context *ctx,
|
||||
|
||||
ce = intel_context_lookup(ctx, engine);
|
||||
if (likely(ce))
|
||||
return ce;
|
||||
return intel_context_get(ce);
|
||||
|
||||
ce = intel_context_alloc();
|
||||
if (!ce)
|
||||
@ -125,7 +125,7 @@ intel_context_instance(struct i915_gem_context *ctx,
|
||||
intel_context_free(ce);
|
||||
|
||||
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
|
||||
return pos;
|
||||
return intel_context_get(pos);
|
||||
}
|
||||
|
||||
struct intel_context *
|
||||
@ -139,30 +139,30 @@ intel_context_pin_lock(struct i915_gem_context *ctx,
|
||||
if (IS_ERR(ce))
|
||||
return ce;
|
||||
|
||||
if (mutex_lock_interruptible(&ce->pin_mutex))
|
||||
if (mutex_lock_interruptible(&ce->pin_mutex)) {
|
||||
intel_context_put(ce);
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
|
||||
return ce;
|
||||
}
|
||||
|
||||
struct intel_context *
|
||||
intel_context_pin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
void intel_context_pin_unlock(struct intel_context *ce)
|
||||
__releases(ce->pin_mutex)
|
||||
{
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
||||
int __intel_context_do_pin(struct intel_context *ce)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
int err;
|
||||
|
||||
ce = intel_context_instance(ctx, engine);
|
||||
if (IS_ERR(ce))
|
||||
return ce;
|
||||
|
||||
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
||||
return ce;
|
||||
|
||||
if (mutex_lock_interruptible(&ce->pin_mutex))
|
||||
return ERR_PTR(-EINTR);
|
||||
return -EINTR;
|
||||
|
||||
if (likely(!atomic_read(&ce->pin_count))) {
|
||||
struct i915_gem_context *ctx = ce->gem_context;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
err = 0;
|
||||
@ -172,7 +172,6 @@ intel_context_pin(struct i915_gem_context *ctx,
|
||||
goto err;
|
||||
|
||||
i915_gem_context_get(ctx);
|
||||
GEM_BUG_ON(ce->gem_context != ctx);
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
list_add(&ce->active_link, &ctx->active_engines);
|
||||
@ -186,11 +185,11 @@ intel_context_pin(struct i915_gem_context *ctx,
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
||||
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return ce;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
void intel_context_unpin(struct intel_context *ce)
|
||||
|
@ -49,11 +49,7 @@ intel_context_is_pinned(struct intel_context *ce)
|
||||
return atomic_read(&ce->pin_count);
|
||||
}
|
||||
|
||||
static inline void intel_context_pin_unlock(struct intel_context *ce)
|
||||
__releases(ce->pin_mutex)
|
||||
{
|
||||
mutex_unlock(&ce->pin_mutex);
|
||||
}
|
||||
void intel_context_pin_unlock(struct intel_context *ce);
|
||||
|
||||
struct intel_context *
|
||||
__intel_context_insert(struct i915_gem_context *ctx,
|
||||
@ -63,7 +59,18 @@ void
|
||||
__intel_context_remove(struct intel_context *ce);
|
||||
|
||||
struct intel_context *
|
||||
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
|
||||
intel_context_instance(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
|
||||
int __intel_context_do_pin(struct intel_context *ce);
|
||||
|
||||
static inline int intel_context_pin(struct intel_context *ce)
|
||||
{
|
||||
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
||||
return 0;
|
||||
|
||||
return __intel_context_do_pin(ce);
|
||||
}
|
||||
|
||||
static inline void __intel_context_pin(struct intel_context *ce)
|
||||
{
|
||||
|
@ -713,11 +713,17 @@ static int pin_context(struct i915_gem_context *ctx,
|
||||
struct intel_context **out)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
int err;
|
||||
|
||||
ce = intel_context_pin(ctx, engine);
|
||||
ce = intel_context_instance(ctx, engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*out = ce;
|
||||
return 0;
|
||||
}
|
||||
|
@ -239,6 +239,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
||||
int id)
|
||||
{
|
||||
struct mock_engine *engine;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(id >= I915_NUM_ENGINES);
|
||||
|
||||
@ -278,10 +279,15 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
||||
INIT_LIST_HEAD(&engine->hw_queue);
|
||||
|
||||
engine->base.kernel_context =
|
||||
intel_context_pin(i915->kernel_context, &engine->base);
|
||||
intel_context_instance(i915->kernel_context, &engine->base);
|
||||
if (IS_ERR(engine->base.kernel_context))
|
||||
goto err_breadcrumbs;
|
||||
|
||||
err = intel_context_pin(engine->base.kernel_context);
|
||||
intel_context_put(engine->base.kernel_context);
|
||||
if (err)
|
||||
goto err_breadcrumbs;
|
||||
|
||||
return &engine->base;
|
||||
|
||||
err_breadcrumbs:
|
||||
|
@ -1183,12 +1183,17 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
INIT_LIST_HEAD(&s->workload_q_head[i]);
|
||||
s->shadow[i] = ERR_PTR(-EINVAL);
|
||||
|
||||
ce = intel_context_pin(ctx, engine);
|
||||
ce = intel_context_instance(ctx, engine);
|
||||
if (IS_ERR(ce)) {
|
||||
ret = PTR_ERR(ce);
|
||||
goto out_shadow_ctx;
|
||||
}
|
||||
|
||||
ret = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (ret)
|
||||
goto out_shadow_ctx;
|
||||
|
||||
s->shadow[i] = ce;
|
||||
}
|
||||
|
||||
|
@ -2100,14 +2100,19 @@ static int eb_pin_context(struct i915_execbuffer *eb,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ce = intel_context_instance(eb->gem_context, engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
|
||||
/*
|
||||
* Pinning the contexts may generate requests in order to acquire
|
||||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
ce = intel_context_pin(eb->gem_context, engine);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
err = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
eb->engine = engine;
|
||||
eb->context = ce;
|
||||
|
@ -1205,11 +1205,17 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
|
||||
{
|
||||
struct intel_engine_cs *engine = i915->engine[RCS0];
|
||||
struct intel_context *ce;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(&i915->drm);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
ce = intel_context_instance(ctx, engine);
|
||||
if (IS_ERR(ce))
|
||||
return ce;
|
||||
|
||||
err = i915_mutex_lock_interruptible(&i915->drm);
|
||||
if (err) {
|
||||
intel_context_put(ce);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* As the ID is the gtt offset of the context's vma we
|
||||
@ -1217,10 +1223,11 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
|
||||
*
|
||||
* NB: implied RCS engine...
|
||||
*/
|
||||
ce = intel_context_pin(ctx, engine);
|
||||
err = intel_context_pin(ce);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (IS_ERR(ce))
|
||||
return ce;
|
||||
intel_context_put(ce);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
i915->perf.oa.pinned_ctx = ce;
|
||||
|
||||
|
@ -785,6 +785,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Preempt contexts are reserved for exclusive use to inject a
|
||||
@ -798,13 +799,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
ce = intel_context_pin(ctx, engine);
|
||||
ce = intel_context_instance(ctx, engine);
|
||||
if (IS_ERR(ce))
|
||||
return ERR_CAST(ce);
|
||||
|
||||
err = intel_context_pin(ce);
|
||||
if (err) {
|
||||
rq = ERR_PTR(err);
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
rq = i915_request_create(ce);
|
||||
intel_context_unpin(ce);
|
||||
|
||||
err_put:
|
||||
intel_context_put(ce);
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user