linux_dsm_epyc7002/drivers/gpu/drm/i915/selftests/intel_lrc.c

662 lines
14 KiB
C
Raw Normal View History

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include "../i915_reset.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
#include "mock_context.h"
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
if (!ctx)
goto err_spin;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
}
}
err = 0;
err_ctx:
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
if (!intel_has_reset_engine(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
init_completion(&engine->execlists.preempt_hang.completion);
engine->execlists.preempt_hang.inject_hang = true;
i915_request_add(rq);
if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
i915_reset_engine(engine, NULL);
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int random_range(struct rnd_state *rnd, int min, int max)
{
return i915_prandom_u32_max_state(max - min, rnd) + min;
}
static int random_priority(struct rnd_state *rnd)
{
return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
}
struct preempt_smoke {
struct drm_i915_private *i915;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
};
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
{
return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
&smoke->prng)];
}
static int smoke_submit(struct preempt_smoke *smoke,
struct i915_gem_context *ctx, int prio,
struct drm_i915_gem_object *batch)
{
struct i915_request *rq;
struct i915_vma *vma = NULL;
int err = 0;
if (batch) {
vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return err;
}
ctx->sched.priority = prio;
rq = i915_request_alloc(smoke->engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
if (vma) {
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
}
i915_request_add(rq);
unpin:
if (vma)
i915_vma_unpin(vma);
return err;
}
static int smoke_crescendo_thread(void *arg)
{
struct preempt_smoke *smoke = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
}
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct preempt_smoke arg[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count;
int err = 0;
mutex_unlock(&smoke->i915->drm.struct_mutex);
for_each_engine(engine, smoke->i915, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
"igt/smoke:%d", id);
if (IS_ERR(tsk[id])) {
err = PTR_ERR(tsk[id]);
break;
}
drm/i915/selftests: Hold task_struct ref for smoking kthread As the kthread may terminate itself, the parent must hold a task_struct reference for it to call kthread_stop(). <4> [498.827675] stack segment: 0000 [#1] PREEMPT SMP PTI <4> [498.827683] CPU: 0 PID: 3872 Comm: drv_selftest Tainted: G U 4.19.0-rc6-CI-CI_DRM_4915+ #1 <4> [498.827686] Hardware name: Intel Corporation NUC7CJYH/NUC7JYB, BIOS JYGLKCPX.86A.0027.2018.0125.1347 01/25/2018 <4> [498.827695] RIP: 0010:kthread_stop+0x36/0x210 <4> [498.827698] Code: 05 df 3d f6 7e 89 c0 48 0f a3 05 95 f8 29 01 0f 82 56 01 00 00 f0 ff 43 20 f6 43 26 20 0f 84 7f 01 00 00 48 8b ab b0 05 00 00 <f0> 80 4d 00 02 48 89 df e8 5d ff ff ff 48 89 df e8 15 c7 00 00 48 <4> [498.827701] RSP: 0018:ffffc900003937d0 EFLAGS: 00010202 <4> [498.827704] RAX: 0000000000000001 RBX: ffff8802165ece40 RCX: 0000000000000001 <4> [498.827707] RDX: 0000000000000000 RSI: 00000000ffffffff RDI: ffffffff82247460 <4> [498.827709] RBP: 6b6b6b6b6b6b6b6b R08: 00000000581395cb R09: 0000000000000001 <4> [498.827711] R10: 0000000000000000 R11: 0000000000000000 R12: ffffc90000393868 <4> [498.827713] R13: ffffc900003937f0 R14: ffff88026c068040 R15: 0000000000001057 <4> [498.827716] FS: 00007fc0c464b980(0000) GS:ffff880277e00000(0000) knlGS:0000000000000000 <4> [498.827718] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [498.827720] CR2: 000056178c2feca0 CR3: 000000026983c000 CR4: 0000000000340ef0 <4> [498.827723] Call Trace: <4> [498.827824] smoke_crescendo+0x14c/0x1d0 [i915] <4> [498.827837] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4> [498.827898] ? __i915_gem_context_pin_hw_id+0x69/0x5f0 [i915] <4> [498.827902] ? ida_alloc_range+0x1f2/0x3d0 <4> [498.827907] ? __mutex_unlock_slowpath+0x46/0x2b0 <4> [498.827914] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.827979] live_preempt_smoke+0x2c2/0x470 [i915] <4> [498.828047] __i915_subtests+0x5e/0xf0 [i915] <4> [498.828113] __run_selftests+0x10b/0x190 [i915] <4> [498.828175] i915_live_selftests+0x2c/0x60 [i915] <4> [498.828232] i915_pci_probe+0x50/0xa0 [i915] <4> [498.828238] pci_device_probe+0xa1/0x130 <4> [498.828244] really_probe+0x25d/0x3c0 <4> [498.828249] driver_probe_device+0x10a/0x120 <4> [498.828253] __driver_attach+0xdb/0x100 <4> [498.828256] ? driver_probe_device+0x120/0x120 <4> [498.828259] bus_for_each_dev+0x74/0xc0 <4> [498.828264] bus_add_driver+0x15f/0x250 <4> [498.828268] ? 0xffffffffa00c3000 <4> [498.828271] driver_register+0x56/0xe0 <4> [498.828274] ? 0xffffffffa00c3000 <4> [498.828278] do_one_initcall+0x58/0x2e0 <4> [498.828281] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.828285] ? do_init_module+0x1d/0x1ea <4> [498.828289] ? rcu_read_lock_sched_held+0x6f/0x80 <4> [498.828293] ? kmem_cache_alloc_trace+0x264/0x290 <4> [498.828297] do_init_module+0x56/0x1ea <4> [498.828302] load_module+0x26f5/0x29d0 <4> [498.828309] ? vfs_read+0x122/0x140 <4> [498.828318] ? __se_sys_finit_module+0xd3/0xf0 <4> [498.828321] __se_sys_finit_module+0xd3/0xf0 <4> [498.828329] do_syscall_64+0x55/0x190 <4> [498.828332] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4> [498.828335] RIP: 0033:0x7fc0c3f16839 Fixes: 992d2098ef0b ("drm/i915/selftests: Split preemption smoke test into threads") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181002132927.7669-1-chris@chris-wilson.co.uk
2018-10-02 20:29:27 +07:00
get_task_struct(tsk[id]);
}
count = 0;
for_each_engine(engine, smoke->i915, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
continue;
status = kthread_stop(tsk[id]);
if (status && !err)
err = status;
count += arg[id].count;
drm/i915/selftests: Hold task_struct ref for smoking kthread As the kthread may terminate itself, the parent must hold a task_struct reference for it to call kthread_stop(). <4> [498.827675] stack segment: 0000 [#1] PREEMPT SMP PTI <4> [498.827683] CPU: 0 PID: 3872 Comm: drv_selftest Tainted: G U 4.19.0-rc6-CI-CI_DRM_4915+ #1 <4> [498.827686] Hardware name: Intel Corporation NUC7CJYH/NUC7JYB, BIOS JYGLKCPX.86A.0027.2018.0125.1347 01/25/2018 <4> [498.827695] RIP: 0010:kthread_stop+0x36/0x210 <4> [498.827698] Code: 05 df 3d f6 7e 89 c0 48 0f a3 05 95 f8 29 01 0f 82 56 01 00 00 f0 ff 43 20 f6 43 26 20 0f 84 7f 01 00 00 48 8b ab b0 05 00 00 <f0> 80 4d 00 02 48 89 df e8 5d ff ff ff 48 89 df e8 15 c7 00 00 48 <4> [498.827701] RSP: 0018:ffffc900003937d0 EFLAGS: 00010202 <4> [498.827704] RAX: 0000000000000001 RBX: ffff8802165ece40 RCX: 0000000000000001 <4> [498.827707] RDX: 0000000000000000 RSI: 00000000ffffffff RDI: ffffffff82247460 <4> [498.827709] RBP: 6b6b6b6b6b6b6b6b R08: 00000000581395cb R09: 0000000000000001 <4> [498.827711] R10: 0000000000000000 R11: 0000000000000000 R12: ffffc90000393868 <4> [498.827713] R13: ffffc900003937f0 R14: ffff88026c068040 R15: 0000000000001057 <4> [498.827716] FS: 00007fc0c464b980(0000) GS:ffff880277e00000(0000) knlGS:0000000000000000 <4> [498.827718] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [498.827720] CR2: 000056178c2feca0 CR3: 000000026983c000 CR4: 0000000000340ef0 <4> [498.827723] Call Trace: <4> [498.827824] smoke_crescendo+0x14c/0x1d0 [i915] <4> [498.827837] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4> [498.827898] ? __i915_gem_context_pin_hw_id+0x69/0x5f0 [i915] <4> [498.827902] ? ida_alloc_range+0x1f2/0x3d0 <4> [498.827907] ? __mutex_unlock_slowpath+0x46/0x2b0 <4> [498.827914] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.827979] live_preempt_smoke+0x2c2/0x470 [i915] <4> [498.828047] __i915_subtests+0x5e/0xf0 [i915] <4> [498.828113] __run_selftests+0x10b/0x190 [i915] <4> [498.828175] i915_live_selftests+0x2c/0x60 [i915] <4> [498.828232] i915_pci_probe+0x50/0xa0 [i915] <4> [498.828238] pci_device_probe+0xa1/0x130 <4> [498.828244] really_probe+0x25d/0x3c0 <4> [498.828249] driver_probe_device+0x10a/0x120 <4> [498.828253] __driver_attach+0xdb/0x100 <4> [498.828256] ? driver_probe_device+0x120/0x120 <4> [498.828259] bus_for_each_dev+0x74/0xc0 <4> [498.828264] bus_add_driver+0x15f/0x250 <4> [498.828268] ? 0xffffffffa00c3000 <4> [498.828271] driver_register+0x56/0xe0 <4> [498.828274] ? 0xffffffffa00c3000 <4> [498.828278] do_one_initcall+0x58/0x2e0 <4> [498.828281] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.828285] ? do_init_module+0x1d/0x1ea <4> [498.828289] ? rcu_read_lock_sched_held+0x6f/0x80 <4> [498.828293] ? kmem_cache_alloc_trace+0x264/0x290 <4> [498.828297] do_init_module+0x56/0x1ea <4> [498.828302] load_module+0x26f5/0x29d0 <4> [498.828309] ? vfs_read+0x122/0x140 <4> [498.828318] ? __se_sys_finit_module+0xd3/0xf0 <4> [498.828321] __se_sys_finit_module+0xd3/0xf0 <4> [498.828329] do_syscall_64+0x55/0x190 <4> [498.828332] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4> [498.828335] RIP: 0033:0x7fc0c3f16839 Fixes: 992d2098ef0b ("drm/i915/selftests: Split preemption smoke test into threads") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181002132927.7669-1-chris@chris-wilson.co.uk
2018-10-02 20:29:27 +07:00
put_task_struct(tsk[id]);
}
mutex_lock(&smoke->i915->drm.struct_mutex);
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
{
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
for_each_engine(smoke->engine, smoke->i915, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
err = smoke_submit(smoke,
ctx, random_priority(&smoke->prng),
flags & BATCH ? smoke->batch : NULL);
if (err)
return err;
count++;
}
} while (!__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
.i915 = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *cs;
int n;
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
sizeof(*smoke.contexts),
GFP_KERNEL);
if (!smoke.contexts)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(smoke.i915);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
goto err_unlock;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
}
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(smoke.batch);
err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
if (err)
goto err_batch;
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
if (!smoke.contexts[n])
goto err_ctx;
}
for (n = 0; n < ARRAY_SIZE(phase); n++) {
err = smoke_crescendo(&smoke, phase[n]);
if (err)
goto err_ctx;
err = smoke_random(&smoke, phase[n]);
if (err)
goto err_ctx;
}
err_ctx:
if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
if (!smoke.contexts[n])
break;
kernel_context_close(smoke.contexts[n]);
}
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
intel_runtime_pm_put(smoke.i915, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
return err;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
};
if (!HAS_EXECLISTS(i915))
return 0;
if (i915_terminally_wedged(&i915->gpu_error))
return 0;
return i915_subtests(tests, i915);
}