drm/i915/selftests: Remove live_suppress_wait_preempt

With the removal of the internal wait-priority boosting, we can also
remove the selftest to ensure that those waits were being suppressed
from causing preemptions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200607222108.14401-4-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-06-07 23:20:44 +01:00
parent 8d712a7e01
commit ad2ad80e64

View File

@ -2379,183 +2379,6 @@ static int live_suppress_self_preempt(void *arg)
goto err_client_b;
}
static int __i915_sw_fence_call
dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
static struct i915_request *dummy_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
rq = kzalloc(sizeof(*rq), GFP_KERNEL);
if (!rq)
return NULL;
rq->engine = engine;
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->fence.cb_list);
rq->fence.lock = &rq->lock;
rq->fence.ops = &i915_fence_ops;
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
rq->fence.seqno = 1;
BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
GEM_BUG_ON(i915_request_completed(rq));
i915_sw_fence_init(&rq->submit, dummy_notify);
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
spin_lock_init(&rq->lock);
rq->fence.lock = &rq->lock;
INIT_LIST_HEAD(&rq->fence.cb_list);
return rq;
}
static void dummy_request_free(struct i915_request *dummy)
{
/* We have to fake the CS interrupt to kick the next request */
i915_sw_fence_commit(&dummy->submit);
i915_request_mark_complete(dummy);
dma_fence_signal(&dummy->fence);
i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit);
dma_fence_free(&dummy->fence);
}
static int live_suppress_wait_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct preempt_client client[4];
struct i915_request *rq[ARRAY_SIZE(client)] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
int i;
/*
* Waiters are given a little priority nudge, but not enough
* to actually cause any preemption. Double check that we do
* not needlessly generate preempt-to-idle cycles.
*/
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
return -ENOMEM;
if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
goto err_client_0;
if (preempt_client_init(gt, &client[2])) /* head of queue */
goto err_client_1;
if (preempt_client_init(gt, &client[3])) /* bystander */
goto err_client_2;
for_each_engine(engine, gt, id) {
int depth;
if (!intel_engine_has_preemption(engine))
continue;
if (!engine->emit_init_breadcrumb)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
dummy = dummy_request(engine);
if (!dummy)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
struct i915_request *this;
this = spinner_create_request(&client[i].spin,
client[i].ctx, engine,
MI_NOOP);
if (IS_ERR(this)) {
err = PTR_ERR(this);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
__i915_active_fence_set(&i915_request_timeline(this)->last_request,
&dummy->fence);
rq[i] = i915_request_get(this);
i915_request_add(this);
}
dummy_request_free(dummy);
GEM_BUG_ON(i915_request_completed(rq[0]));
if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
pr_err("%s: First client failed to start\n",
engine->name);
goto err_wedged;
}
GEM_BUG_ON(!i915_request_started(rq[0]));
if (i915_request_wait(rq[depth],
I915_WAIT_PRIORITY,
1) != -ETIME) {
pr_err("%s: Waiter depth:%d completed!\n",
engine->name, depth);
goto err_wedged;
}
for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
i915_request_put(rq[i]);
rq[i] = NULL;
}
if (igt_flush_test(gt->i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
engine->name,
engine->execlists.preempt_hang.count,
depth);
err = -EINVAL;
goto err_client_3;
}
}
}
err = 0;
err_client_3:
preempt_client_fini(&client[3]);
err_client_2:
preempt_client_fini(&client[2]);
err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
return err;
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
i915_request_put(rq[i]);
}
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
struct intel_gt *gt = arg;
@ -4592,7 +4415,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nopreempt),
SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),