drm/i915: Pass intel_context to intel_context_pin_lock()

Move the intel_context_instance() to the caller so that we can decouple
ourselves from one context instance per engine.

v2: Rename pin_lock() to lock_pinned(), hopefully that is clearer.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190426163336.15906-5-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-04-26 17:33:32 +01:00
parent 1b1ae40721
commit 6b736de574
4 changed files with 82 additions and 72 deletions

View File

@ -128,32 +128,6 @@ intel_context_instance(struct i915_gem_context *ctx,
return intel_context_get(pos); return intel_context_get(pos);
} }
struct intel_context *
intel_context_pin_lock(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
__acquires(ce->pin_mutex)
{
struct intel_context *ce;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (mutex_lock_interruptible(&ce->pin_mutex)) {
intel_context_put(ce);
return ERR_PTR(-EINTR);
}
return ce;
}
void intel_context_pin_unlock(struct intel_context *ce)
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
intel_context_put(ce);
}
int __intel_context_do_pin(struct intel_context *ce) int __intel_context_do_pin(struct intel_context *ce)
{ {
int err; int err;

View File

@ -31,25 +31,45 @@ intel_context_lookup(struct i915_gem_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
/** /**
* intel_context_pin_lock - Stablises the 'pinned' status of the HW context * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ctx - the parent GEM context * @ce - the context
* @engine - the target HW engine
* *
* Acquire a lock on the pinned status of the HW context, such that the context * Acquire a lock on the pinned status of the HW context, such that the context
* can neither be bound to the GPU or unbound whilst the lock is held, i.e. * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
* intel_context_is_pinned() remains stable. * intel_context_is_pinned() remains stable.
*/ */
struct intel_context * static inline int intel_context_lock_pinned(struct intel_context *ce)
intel_context_pin_lock(struct i915_gem_context *ctx, __acquires(ce->pin_mutex)
struct intel_engine_cs *engine); {
return mutex_lock_interruptible(&ce->pin_mutex);
}
/**
* intel_context_is_pinned - Reports the 'pinned' status
* @ce - the context
*
* While in use by the GPU, the context, along with its ring and page
* tables is pinned into memory and the GTT.
*
* Returns: true if the context is currently pinned for use by the GPU.
*/
static inline bool static inline bool
intel_context_is_pinned(struct intel_context *ce) intel_context_is_pinned(struct intel_context *ce)
{ {
return atomic_read(&ce->pin_count); return atomic_read(&ce->pin_count);
} }
void intel_context_pin_unlock(struct intel_context *ce); /**
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
* @ce - the context
*
* Releases the lock earlier acquired by intel_context_unlock_pinned().
*/
static inline void intel_context_unlock_pinned(struct intel_context *ce)
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
}
struct intel_context * struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx, __intel_context_insert(struct i915_gem_context *ctx,

View File

@ -141,6 +141,18 @@ static void lut_close(struct i915_gem_context *ctx)
rcu_read_unlock(); rcu_read_unlock();
} }
static struct intel_context *
lookup_user_engine(struct i915_gem_context *ctx, u16 class, u16 instance)
{
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(ctx->i915, class, instance);
if (!engine)
return ERR_PTR(-EINVAL);
return intel_context_instance(ctx, engine);
}
static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
{ {
unsigned int max; unsigned int max;
@ -1132,19 +1144,17 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
} }
static int static int
__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, __intel_context_reconfigure_sseu(struct intel_context *ce,
struct intel_engine_cs *engine, struct intel_sseu sseu)
struct intel_sseu sseu)
{ {
struct intel_context *ce; int ret;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
GEM_BUG_ON(engine->id != RCS0); GEM_BUG_ON(ce->engine->id != RCS0);
ce = intel_context_pin_lock(ctx, engine); ret = intel_context_lock_pinned(ce);
if (IS_ERR(ce)) if (ret)
return PTR_ERR(ce); return ret;
/* Nothing to do if unmodified. */ /* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
@ -1155,24 +1165,23 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
ce->sseu = sseu; ce->sseu = sseu;
unlock: unlock:
intel_context_pin_unlock(ce); intel_context_unlock_pinned(ce);
return ret; return ret;
} }
static int static int
i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{ {
struct drm_i915_private *i915 = ce->gem_context->i915;
int ret; int ret;
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret) if (ret)
return ret; return ret;
ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); ret = __intel_context_reconfigure_sseu(ce, sseu);
mutex_unlock(&ctx->i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
return ret; return ret;
} }
@ -1280,7 +1289,7 @@ static int set_sseu(struct i915_gem_context *ctx,
{ {
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_context_param_sseu user_sseu; struct drm_i915_gem_context_param_sseu user_sseu;
struct intel_engine_cs *engine; struct intel_context *ce;
struct intel_sseu sseu; struct intel_sseu sseu;
int ret; int ret;
@ -1297,27 +1306,31 @@ static int set_sseu(struct i915_gem_context *ctx,
if (user_sseu.flags || user_sseu.rsvd) if (user_sseu.flags || user_sseu.rsvd)
return -EINVAL; return -EINVAL;
engine = intel_engine_lookup_user(i915, ce = lookup_user_engine(ctx,
user_sseu.engine.engine_class, user_sseu.engine.engine_class,
user_sseu.engine.engine_instance); user_sseu.engine.engine_instance);
if (!engine) if (IS_ERR(ce))
return -EINVAL; return PTR_ERR(ce);
/* Only render engine supports RPCS configuration. */ /* Only render engine supports RPCS configuration. */
if (engine->class != RENDER_CLASS) if (ce->engine->class != RENDER_CLASS) {
return -ENODEV; ret = -ENODEV;
goto out_ce;
}
ret = user_to_context_sseu(i915, &user_sseu, &sseu); ret = user_to_context_sseu(i915, &user_sseu, &sseu);
if (ret) if (ret)
return ret; goto out_ce;
ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu); ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret) if (ret)
return ret; goto out_ce;
args->size = sizeof(user_sseu); args->size = sizeof(user_sseu);
return 0; out_ce:
intel_context_put(ce);
return ret;
} }
static int ctx_setparam(struct drm_i915_file_private *fpriv, static int ctx_setparam(struct drm_i915_file_private *fpriv,
@ -1522,8 +1535,8 @@ static int get_sseu(struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args) struct drm_i915_gem_context_param *args)
{ {
struct drm_i915_gem_context_param_sseu user_sseu; struct drm_i915_gem_context_param_sseu user_sseu;
struct intel_engine_cs *engine;
struct intel_context *ce; struct intel_context *ce;
int err;
if (args->size == 0) if (args->size == 0)
goto out; goto out;
@ -1537,22 +1550,25 @@ static int get_sseu(struct i915_gem_context *ctx,
if (user_sseu.flags || user_sseu.rsvd) if (user_sseu.flags || user_sseu.rsvd)
return -EINVAL; return -EINVAL;
engine = intel_engine_lookup_user(ctx->i915, ce = lookup_user_engine(ctx,
user_sseu.engine.engine_class, user_sseu.engine.engine_class,
user_sseu.engine.engine_instance); user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
if (IS_ERR(ce)) if (IS_ERR(ce))
return PTR_ERR(ce); return PTR_ERR(ce);
err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
if (err) {
intel_context_put(ce);
return err;
}
user_sseu.slice_mask = ce->sseu.slice_mask; user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask; user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
intel_context_pin_unlock(ce); intel_context_unlock_pinned(ce);
intel_context_put(ce);
if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
sizeof(user_sseu))) sizeof(user_sseu)))

View File

@ -1017,7 +1017,7 @@ __sseu_test(struct drm_i915_private *i915,
if (ret) if (ret)
return ret; return ret;
ret = __i915_gem_context_reconfigure_sseu(ce->gem_context, ce->engine, sseu); ret = __intel_context_reconfigure_sseu(ce, sseu);
if (ret) if (ret)
goto out_spin; goto out_spin;