2018-04-04 16:33:29 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
#include "../i915_reset.h"
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
#include "../i915_selftest.h"
|
2018-05-05 16:10:13 +07:00
|
|
|
#include "igt_flush_test.h"
|
2018-11-30 15:02:53 +07:00
|
|
|
#include "igt_spinner.h"
|
2018-09-25 15:31:58 +07:00
|
|
|
#include "i915_random.h"
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
#include "mock_context.h"
|
|
|
|
|
|
|
|
static int live_sanitycheck(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
enum intel_engine_id id;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
ctx = kernel_context(i915);
|
|
|
|
if (!ctx)
|
|
|
|
goto err_spin;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin);
|
2018-05-05 16:10:13 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx:
|
|
|
|
kernel_context_close(ctx);
|
|
|
|
err_spin:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
2018-10-01 19:32:03 +07:00
|
|
|
ctx_hi->sched.priority =
|
|
|
|
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
2018-10-01 19:32:03 +07:00
|
|
|
ctx_lo->sched.priority =
|
|
|
|
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("lo spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("hi spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-05-05 16:10:13 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_late_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2018-04-19 01:40:52 +07:00
|
|
|
struct i915_sched_attr attr = {};
|
2018-04-04 16:33:29 +07:00
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("First context failed to start\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_NOOP);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("Second context overtook first?\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:03 +07:00
|
|
|
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
|
2018-04-19 01:40:52 +07:00
|
|
|
engine->schedule(rq, &attr);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("High priority context failed to preempt the low priority context\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-05-05 16:10:13 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_wedged:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-07-16 20:21:54 +07:00
|
|
|
static int live_preempt_hang(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-07-16 20:21:54 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-07-16 20:21:54 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!intel_has_reset_engine(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-07-16 20:21:54 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-07-16 20:21:54 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-07-16 20:21:54 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
|
|
|
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
|
|
|
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-07-16 20:21:54 +07:00
|
|
|
GEM_TRACE("lo spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&engine->execlists.preempt_hang.completion);
|
|
|
|
engine->execlists.preempt_hang.inject_hang = true;
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
|
|
|
|
HZ / 10)) {
|
|
|
|
pr_err("Preemption did not occur within timeout!");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
|
|
|
i915_reset_engine(engine, NULL);
|
|
|
|
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
|
|
|
|
|
|
|
engine->execlists.preempt_hang.inject_hang = false;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-07-16 20:21:54 +07:00
|
|
|
GEM_TRACE("hi spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-07-16 20:21:54 +07:00
|
|
|
err_unlock:
|
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-07-16 20:21:54 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:31:58 +07:00
|
|
|
static int random_range(struct rnd_state *rnd, int min, int max)
|
|
|
|
{
|
|
|
|
return i915_prandom_u32_max_state(max - min, rnd) + min;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int random_priority(struct rnd_state *rnd)
|
|
|
|
{
|
|
|
|
return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct preempt_smoke {
|
|
|
|
struct drm_i915_private *i915;
|
|
|
|
struct i915_gem_context **contexts;
|
2018-10-01 19:32:01 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2018-10-01 19:32:02 +07:00
|
|
|
struct drm_i915_gem_object *batch;
|
2018-09-25 15:31:58 +07:00
|
|
|
unsigned int ncontext;
|
|
|
|
struct rnd_state prng;
|
2018-10-01 19:32:01 +07:00
|
|
|
unsigned long count;
|
2018-09-25 15:31:58 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
|
|
|
|
{
|
|
|
|
return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
|
|
|
|
&smoke->prng)];
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_submit(struct preempt_smoke *smoke,
|
|
|
|
struct i915_gem_context *ctx, int prio,
|
|
|
|
struct drm_i915_gem_object *batch)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct i915_vma *vma = NULL;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (batch) {
|
|
|
|
vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->sched.priority = prio;
|
|
|
|
|
|
|
|
rq = i915_request_alloc(smoke->engine, ctx);
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma) {
|
|
|
|
err = rq->engine->emit_bb_start(rq,
|
|
|
|
vma->node.start,
|
|
|
|
PAGE_SIZE, 0);
|
|
|
|
if (!err)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
unpin:
|
|
|
|
if (vma)
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
static int smoke_crescendo_thread(void *arg)
|
|
|
|
{
|
|
|
|
struct preempt_smoke *smoke = arg;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
do {
|
|
|
|
struct i915_gem_context *ctx = smoke_context(smoke);
|
2018-10-01 19:32:02 +07:00
|
|
|
int err;
|
2018-10-01 19:32:01 +07:00
|
|
|
|
|
|
|
mutex_lock(&smoke->i915->drm.struct_mutex);
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_submit(smoke,
|
|
|
|
ctx, count % I915_PRIORITY_MAX,
|
|
|
|
smoke->batch);
|
2018-10-01 19:32:01 +07:00
|
|
|
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
2018-10-01 19:32:02 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2018-10-01 19:32:01 +07:00
|
|
|
|
|
|
|
count++;
|
|
|
|
} while (!__igt_timeout(end_time, NULL));
|
|
|
|
|
|
|
|
smoke->count = count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|
|
|
#define BATCH BIT(0)
|
2018-09-25 15:31:58 +07:00
|
|
|
{
|
2018-10-01 19:32:01 +07:00
|
|
|
struct task_struct *tsk[I915_NUM_ENGINES] = {};
|
|
|
|
struct preempt_smoke arg[I915_NUM_ENGINES];
|
2018-09-25 15:31:58 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
unsigned long count;
|
2018-10-01 19:32:01 +07:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
for_each_engine(engine, smoke->i915, id) {
|
2018-10-01 19:32:01 +07:00
|
|
|
arg[id] = *smoke;
|
|
|
|
arg[id].engine = engine;
|
2018-10-01 19:32:02 +07:00
|
|
|
if (!(flags & BATCH))
|
|
|
|
arg[id].batch = NULL;
|
2018-10-01 19:32:01 +07:00
|
|
|
arg[id].count = 0;
|
|
|
|
|
|
|
|
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
|
|
|
|
"igt/smoke:%d", id);
|
|
|
|
if (IS_ERR(tsk[id])) {
|
|
|
|
err = PTR_ERR(tsk[id]);
|
|
|
|
break;
|
|
|
|
}
|
2018-10-02 20:29:27 +07:00
|
|
|
get_task_struct(tsk[id]);
|
2018-10-01 19:32:01 +07:00
|
|
|
}
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
count = 0;
|
|
|
|
for_each_engine(engine, smoke->i915, id) {
|
|
|
|
int status;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
if (IS_ERR_OR_NULL(tsk[id]))
|
|
|
|
continue;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
status = kthread_stop(tsk[id]);
|
|
|
|
if (status && !err)
|
|
|
|
err = status;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
count += arg[id].count;
|
2018-10-02 20:29:27 +07:00
|
|
|
|
|
|
|
put_task_struct(tsk[id]);
|
2018-09-25 15:31:58 +07:00
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
mutex_lock(&smoke->i915->drm.struct_mutex);
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
|
|
|
count, flags,
|
2018-12-31 21:56:41 +07:00
|
|
|
RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
|
2018-09-25 15:31:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
|
2018-09-25 15:31:58 +07:00
|
|
|
{
|
|
|
|
enum intel_engine_id id;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
do {
|
2018-10-01 19:32:02 +07:00
|
|
|
for_each_engine(smoke->engine, smoke->i915, id) {
|
2018-09-25 15:31:58 +07:00
|
|
|
struct i915_gem_context *ctx = smoke_context(smoke);
|
2018-10-01 19:32:02 +07:00
|
|
|
int err;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_submit(smoke,
|
|
|
|
ctx, random_priority(&smoke->prng),
|
|
|
|
flags & BATCH ? smoke->batch : NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
} while (!__igt_timeout(end_time, NULL));
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
|
|
|
|
count, flags,
|
2018-12-31 21:56:41 +07:00
|
|
|
RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
|
2018-09-25 15:31:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_preempt_smoke(void *arg)
|
|
|
|
{
|
|
|
|
struct preempt_smoke smoke = {
|
|
|
|
.i915 = arg,
|
|
|
|
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
|
|
|
|
.ncontext = 1024,
|
|
|
|
};
|
2018-10-01 19:32:02 +07:00
|
|
|
const unsigned int phase[] = { 0, BATCH };
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-09-25 15:31:58 +07:00
|
|
|
int err = -ENOMEM;
|
2018-10-01 19:32:02 +07:00
|
|
|
u32 *cs;
|
2018-09-25 15:31:58 +07:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
smoke.contexts = kmalloc_array(smoke.ncontext,
|
|
|
|
sizeof(*smoke.contexts),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!smoke.contexts)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mutex_lock(&smoke.i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(smoke.i915);
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(smoke.batch)) {
|
|
|
|
err = PTR_ERR(smoke.batch);
|
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_batch;
|
|
|
|
}
|
|
|
|
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
|
|
|
|
cs[n] = MI_ARB_CHECK;
|
|
|
|
cs[n] = MI_BATCH_BUFFER_END;
|
|
|
|
i915_gem_object_unpin_map(smoke.batch);
|
|
|
|
|
|
|
|
err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
|
|
|
|
if (err)
|
|
|
|
goto err_batch;
|
|
|
|
|
2018-09-25 15:31:58 +07:00
|
|
|
for (n = 0; n < smoke.ncontext; n++) {
|
|
|
|
smoke.contexts[n] = kernel_context(smoke.i915);
|
|
|
|
if (!smoke.contexts[n])
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
for (n = 0; n < ARRAY_SIZE(phase); n++) {
|
|
|
|
err = smoke_crescendo(&smoke, phase[n]);
|
|
|
|
if (err)
|
|
|
|
goto err_ctx;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_random(&smoke, phase[n]);
|
|
|
|
if (err)
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
err_ctx:
|
|
|
|
if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
|
|
|
|
err = -EIO;
|
|
|
|
|
|
|
|
for (n = 0; n < smoke.ncontext; n++) {
|
|
|
|
if (!smoke.contexts[n])
|
|
|
|
break;
|
|
|
|
kernel_context_close(smoke.contexts[n]);
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err_batch:
|
|
|
|
i915_gem_object_put(smoke.batch);
|
|
|
|
err_unlock:
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(smoke.i915, wakeref);
|
2018-09-25 15:31:58 +07:00
|
|
|
mutex_unlock(&smoke.i915->drm.struct_mutex);
|
|
|
|
kfree(smoke.contexts);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(live_sanitycheck),
|
|
|
|
SUBTEST(live_preempt),
|
|
|
|
SUBTEST(live_late_preempt),
|
2018-07-16 20:21:54 +07:00
|
|
|
SUBTEST(live_preempt_hang),
|
2018-09-25 15:31:58 +07:00
|
|
|
SUBTEST(live_preempt_smoke),
|
2018-04-04 16:33:29 +07:00
|
|
|
};
|
2018-05-04 19:42:02 +07:00
|
|
|
|
|
|
|
if (!HAS_EXECLISTS(i915))
|
|
|
|
return 0;
|
|
|
|
|
2018-07-06 18:45:10 +07:00
|
|
|
if (i915_terminally_wedged(&i915->gpu_error))
|
|
|
|
return 0;
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
return i915_subtests(tests, i915);
|
|
|
|
}
|