drm/i915/perf: Assert locking for i915_init_oa_perf_state()

We use the context->pin_mutex to serialise updates to the OA config and
the registers values written into each new context. Document this
relationship and assert we do hold the context->pin_mutex as used by
gen8_configure_all_contexts() to serialise updates to the OA config
itself.

v2: Add a white-lie for when we call intel_gt_resume() from init.
v3: Lie while we have the context pinned inside atomic reset.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20190830181929.18663-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-08-30 19:19:29 +01:00
parent 3d1da92baf
commit dffa8feb30
3 changed files with 19 additions and 1 deletions

View File

@ -6,6 +6,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_params.h" #include "i915_params.h"
#include "intel_context.h"
#include "intel_engine_pm.h" #include "intel_engine_pm.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
@ -142,8 +143,12 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
ce = engine->kernel_context; ce = engine->kernel_context;
if (ce) if (ce) {
GEM_BUG_ON(!intel_context_is_pinned(ce));
mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
ce->ops->reset(ce); ce->ops->reset(ce);
mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
}
engine->serial++; /* kernel context lost */ engine->serial++; /* kernel context lost */
err = engine->resume(engine); err = engine->resume(engine);

View File

@ -2383,6 +2383,11 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
ce = rq->hw_context; ce = rq->hw_context;
GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(i915_active_is_idle(&ce->active));
GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
/* Proclaim we have exclusive access to the context image! */
GEM_BUG_ON(!intel_context_is_pinned(ce));
mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
rq = active_request(rq); rq = active_request(rq);
if (!rq) { if (!rq) {
ce->ring->head = ce->ring->tail; ce->ring->head = ce->ring->tail;
@ -2442,6 +2447,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
engine->name, ce->ring->head, ce->ring->tail); engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring); intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine); __execlists_update_reg_state(ce, engine);
mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
unwind: unwind:
/* Push back any incomplete requests for replay after the reset. */ /* Push back any incomplete requests for replay after the reset. */
@ -3920,6 +3926,9 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
u32 head, u32 head,
bool scrub) bool scrub)
{ {
GEM_BUG_ON(!intel_context_is_pinned(ce));
mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
/* /*
* We want a simple context + ring to execute the breadcrumb update. * We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang, * We cannot rely on the context being intact across the GPU hang,
@ -3944,6 +3953,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
intel_ring_update_space(ce->ring); intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine); __execlists_update_reg_state(ce, engine);
mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

View File

@ -2305,6 +2305,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
{ {
struct i915_perf_stream *stream; struct i915_perf_stream *stream;
/* perf.exclusive_stream serialised by gen8_configure_all_contexts() */
lockdep_assert_held(&ce->pin_mutex);
if (engine->class != RENDER_CLASS) if (engine->class != RENDER_CLASS)
return; return;