2018-04-04 16:33:29 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-02-05 19:38:35 +07:00
|
|
|
#include <linux/prime_numbers.h>
|
|
|
|
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "gt/intel_reset.h"
|
|
|
|
#include "i915_selftest.h"
|
|
|
|
#include "selftests/i915_random.h"
|
|
|
|
#include "selftests/igt_flush_test.h"
|
2019-04-26 23:33:36 +07:00
|
|
|
#include "selftests/igt_gem_utils.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "selftests/igt_live_test.h"
|
|
|
|
#include "selftests/igt_spinner.h"
|
|
|
|
#include "selftests/mock_context.h"
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
static int live_sanitycheck(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
enum intel_engine_id id;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
ctx = kernel_context(i915);
|
|
|
|
if (!ctx)
|
|
|
|
goto err_spin;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin);
|
2018-05-05 16:10:13 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx:
|
|
|
|
kernel_context_close(ctx);
|
|
|
|
err_spin:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-03-29 20:40:24 +07:00
|
|
|
static int live_busywait_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
u32 *map;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
|
|
|
|
* preempt the busywaits used to synchronise between rings.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_unlock;
|
|
|
|
ctx_hi->sched.priority = INT_MAX;
|
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
|
|
|
ctx_lo->sched.priority = INT_MIN;
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
map = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(map)) {
|
|
|
|
err = PTR_ERR(map);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
|
2019-04-05 18:14:30 +07:00
|
|
|
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
|
2019-03-29 20:40:24 +07:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_map;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
|
|
|
|
if (err)
|
|
|
|
goto err_map;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *lo, *hi;
|
|
|
|
struct igt_live_test t;
|
|
|
|
u32 *cs;
|
|
|
|
|
|
|
|
if (!intel_engine_can_store_dword(engine))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We create two requests. The low priority request
|
|
|
|
* busywaits on a semaphore (inside the ringbuffer where
|
|
|
|
* is should be preemptible) and the high priority requests
|
|
|
|
* uses a MI_STORE_DWORD_IMM to update the semaphore value
|
|
|
|
* allowing the first request to complete. If preemption
|
|
|
|
* fails, we hang instead.
|
|
|
|
*/
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
lo = igt_request_alloc(ctx_lo, engine);
|
2019-03-29 20:40:24 +07:00
|
|
|
if (IS_ERR(lo)) {
|
|
|
|
err = PTR_ERR(lo);
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = intel_ring_begin(lo, 8);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
i915_request_add(lo);
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
|
|
|
*cs++ = i915_ggtt_offset(vma);
|
|
|
|
*cs++ = 0;
|
|
|
|
*cs++ = 1;
|
|
|
|
|
|
|
|
/* XXX Do we need a flush + invalidate here? */
|
|
|
|
|
|
|
|
*cs++ = MI_SEMAPHORE_WAIT |
|
|
|
|
MI_SEMAPHORE_GLOBAL_GTT |
|
|
|
|
MI_SEMAPHORE_POLL |
|
|
|
|
MI_SEMAPHORE_SAD_EQ_SDD;
|
|
|
|
*cs++ = 0;
|
|
|
|
*cs++ = i915_ggtt_offset(vma);
|
|
|
|
*cs++ = 0;
|
|
|
|
|
|
|
|
intel_ring_advance(lo, cs);
|
|
|
|
i915_request_add(lo);
|
|
|
|
|
|
|
|
if (wait_for(READ_ONCE(*map), 10)) {
|
|
|
|
err = -ETIMEDOUT;
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Low priority request should be busywaiting now */
|
|
|
|
if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
|
|
|
|
pr_err("%s: Busywaiting request did not!\n",
|
|
|
|
engine->name);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
hi = igt_request_alloc(ctx_hi, engine);
|
2019-03-29 20:40:24 +07:00
|
|
|
if (IS_ERR(hi)) {
|
|
|
|
err = PTR_ERR(hi);
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = intel_ring_begin(hi, 4);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
i915_request_add(hi);
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
|
|
|
*cs++ = i915_ggtt_offset(vma);
|
|
|
|
*cs++ = 0;
|
|
|
|
*cs++ = 0;
|
|
|
|
|
|
|
|
intel_ring_advance(hi, cs);
|
|
|
|
i915_request_add(hi);
|
|
|
|
|
|
|
|
if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
|
|
|
|
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
|
|
|
|
|
|
|
pr_err("%s: Failed to preempt semaphore busywait!\n",
|
|
|
|
engine->name);
|
|
|
|
|
|
|
|
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(READ_ONCE(*map));
|
|
|
|
|
|
|
|
if (igt_live_test_end(&t)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_vma;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_vma:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
err_map:
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_unlock:
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
err = -EIO;
|
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
static int live_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
|
|
|
|
pr_err("Logical preemption supported, but not exposed\n");
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
2018-10-01 19:32:03 +07:00
|
|
|
ctx_hi->sched.priority =
|
|
|
|
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
2018-10-01 19:32:03 +07:00
|
|
|
ctx_lo->sched.priority =
|
|
|
|
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
2019-03-22 02:40:31 +07:00
|
|
|
struct igt_live_test t;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct i915_request *rq;
|
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2019-03-22 02:40:31 +07:00
|
|
|
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("lo spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
GEM_TRACE("hi spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2019-03-22 02:40:31 +07:00
|
|
|
|
|
|
|
if (igt_live_test_end(&t)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_late_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2018-04-19 01:40:52 +07:00
|
|
|
struct i915_sched_attr attr = {};
|
2018-04-04 16:33:29 +07:00
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-04-04 16:33:29 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-04-04 16:33:29 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
2019-03-22 02:40:31 +07:00
|
|
|
struct igt_live_test t;
|
2018-04-04 16:33:29 +07:00
|
|
|
struct i915_request *rq;
|
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2019-03-22 02:40:31 +07:00
|
|
|
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("First context failed to start\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_NOOP);
|
2018-04-04 16:33:29 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("Second context overtook first?\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:03 +07:00
|
|
|
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
|
2018-04-19 01:40:52 +07:00
|
|
|
engine->schedule(rq, &attr);
|
2018-04-04 16:33:29 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
pr_err("High priority context failed to preempt the low priority context\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2019-03-22 02:40:31 +07:00
|
|
|
|
|
|
|
if (igt_live_test_end(&t)) {
|
2018-04-04 16:33:29 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-04-04 16:33:29 +07:00
|
|
|
err_unlock:
|
2018-05-05 16:10:13 +07:00
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-04-04 16:33:29 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_wedged:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-04-04 16:33:29 +07:00
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
|
|
|
struct preempt_client {
|
|
|
|
struct igt_spinner spin;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int preempt_client_init(struct drm_i915_private *i915,
|
|
|
|
struct preempt_client *c)
|
|
|
|
{
|
|
|
|
c->ctx = kernel_context(i915);
|
|
|
|
if (!c->ctx)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (igt_spinner_init(&c->spin, i915))
|
|
|
|
goto err_ctx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_ctx:
|
|
|
|
kernel_context_close(c->ctx);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void preempt_client_fini(struct preempt_client *c)
|
|
|
|
{
|
|
|
|
igt_spinner_fini(&c->spin);
|
|
|
|
kernel_context_close(c->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_suppress_self_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_sched_attr attr = {
|
|
|
|
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
|
|
|
|
};
|
|
|
|
struct preempt_client a, b;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that if a preemption request does not cause a change in
|
|
|
|
* the current execution order, the preempt-to-idle injection is
|
|
|
|
* skipped and that we do not accidentally apply it after the CS
|
|
|
|
* completion event.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (USES_GUC_SUBMISSION(i915))
|
|
|
|
return 0; /* presume black blox */
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
|
|
|
|
|
|
|
if (preempt_client_init(i915, &a))
|
|
|
|
goto err_unlock;
|
|
|
|
if (preempt_client_init(i915, &b))
|
|
|
|
goto err_client_a;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq_a, *rq_b;
|
|
|
|
int depth;
|
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
|
|
|
engine->execlists.preempt_hang.count = 0;
|
|
|
|
|
|
|
|
rq_a = igt_spinner_create_request(&a.spin,
|
|
|
|
a.ctx, engine,
|
|
|
|
MI_NOOP);
|
|
|
|
if (IS_ERR(rq_a)) {
|
|
|
|
err = PTR_ERR(rq_a);
|
|
|
|
goto err_client_b;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq_a);
|
|
|
|
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
|
|
|
|
pr_err("First client failed to start\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (depth = 0; depth < 8; depth++) {
|
|
|
|
rq_b = igt_spinner_create_request(&b.spin,
|
|
|
|
b.ctx, engine,
|
|
|
|
MI_NOOP);
|
|
|
|
if (IS_ERR(rq_b)) {
|
|
|
|
err = PTR_ERR(rq_b);
|
|
|
|
goto err_client_b;
|
|
|
|
}
|
|
|
|
i915_request_add(rq_b);
|
|
|
|
|
|
|
|
GEM_BUG_ON(i915_request_completed(rq_a));
|
|
|
|
engine->schedule(rq_a, &attr);
|
|
|
|
igt_spinner_end(&a.spin);
|
|
|
|
|
|
|
|
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
|
|
|
|
pr_err("Second client failed to start\n");
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
|
|
|
swap(a, b);
|
|
|
|
rq_a = rq_b;
|
|
|
|
}
|
|
|
|
igt_spinner_end(&a.spin);
|
|
|
|
|
|
|
|
if (engine->execlists.preempt_hang.count) {
|
|
|
|
pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
|
|
|
|
engine->execlists.preempt_hang.count,
|
|
|
|
depth);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_client_b;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_client_b:
|
|
|
|
preempt_client_fini(&b);
|
|
|
|
err_client_a:
|
|
|
|
preempt_client_fini(&a);
|
|
|
|
err_unlock:
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
err = -EIO;
|
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_wedged:
|
|
|
|
igt_spinner_end(&b.spin);
|
|
|
|
igt_spinner_end(&a.spin);
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_client_b;
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:06:39 +07:00
|
|
|
static int __i915_sw_fence_call
|
|
|
|
dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_request *dummy_request(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
|
|
|
rq = kzalloc(sizeof(*rq), GFP_KERNEL);
|
|
|
|
if (!rq)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&rq->active_list);
|
|
|
|
rq->engine = engine;
|
|
|
|
|
|
|
|
i915_sched_node_init(&rq->sched);
|
|
|
|
|
|
|
|
/* mark this request as permanently incomplete */
|
|
|
|
rq->fence.seqno = 1;
|
|
|
|
BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
|
|
|
|
rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
|
|
|
|
GEM_BUG_ON(i915_request_completed(rq));
|
|
|
|
|
|
|
|
i915_sw_fence_init(&rq->submit, dummy_notify);
|
|
|
|
i915_sw_fence_commit(&rq->submit);
|
|
|
|
|
|
|
|
return rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dummy_request_free(struct i915_request *dummy)
|
|
|
|
{
|
|
|
|
i915_request_mark_complete(dummy);
|
|
|
|
i915_sched_node_fini(&dummy->sched);
|
|
|
|
i915_sw_fence_fini(&dummy->submit);
|
|
|
|
|
|
|
|
dma_fence_free(&dummy->fence);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_suppress_wait_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct preempt_client client[4];
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Waiters are given a little priority nudge, but not enough
|
|
|
|
* to actually cause any preemption. Double check that we do
|
|
|
|
* not needlessly generate preempt-to-idle cycles.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
|
|
|
|
|
|
|
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
|
|
|
|
goto err_unlock;
|
|
|
|
if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
|
|
|
|
goto err_client_0;
|
|
|
|
if (preempt_client_init(i915, &client[2])) /* head of queue */
|
|
|
|
goto err_client_1;
|
|
|
|
if (preempt_client_init(i915, &client[3])) /* bystander */
|
|
|
|
goto err_client_2;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
int depth;
|
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2019-03-01 05:06:39 +07:00
|
|
|
if (!engine->emit_init_breadcrumb)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
|
|
|
|
struct i915_request *rq[ARRAY_SIZE(client)];
|
|
|
|
struct i915_request *dummy;
|
|
|
|
|
|
|
|
engine->execlists.preempt_hang.count = 0;
|
|
|
|
|
|
|
|
dummy = dummy_request(engine);
|
|
|
|
if (!dummy)
|
|
|
|
goto err_client_3;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
|
|
|
rq[i] = igt_spinner_create_request(&client[i].spin,
|
|
|
|
client[i].ctx, engine,
|
|
|
|
MI_NOOP);
|
|
|
|
if (IS_ERR(rq[i])) {
|
|
|
|
err = PTR_ERR(rq[i]);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable NEWCLIENT promotion */
|
|
|
|
__i915_active_request_set(&rq[i]->timeline->last_request,
|
|
|
|
dummy);
|
|
|
|
i915_request_add(rq[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
dummy_request_free(dummy);
|
|
|
|
|
|
|
|
GEM_BUG_ON(i915_request_completed(rq[0]));
|
|
|
|
if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
|
|
|
|
pr_err("%s: First client failed to start\n",
|
|
|
|
engine->name);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(!i915_request_started(rq[0]));
|
|
|
|
|
|
|
|
if (i915_request_wait(rq[depth],
|
|
|
|
I915_WAIT_LOCKED |
|
|
|
|
I915_WAIT_PRIORITY,
|
|
|
|
1) != -ETIME) {
|
|
|
|
pr_err("%s: Waiter depth:%d completed!\n",
|
|
|
|
engine->name, depth);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(client); i++)
|
|
|
|
igt_spinner_end(&client[i].spin);
|
|
|
|
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
goto err_wedged;
|
|
|
|
|
|
|
|
if (engine->execlists.preempt_hang.count) {
|
|
|
|
pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
|
|
|
|
engine->name,
|
|
|
|
engine->execlists.preempt_hang.count,
|
|
|
|
depth);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_client_3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_client_3:
|
|
|
|
preempt_client_fini(&client[3]);
|
|
|
|
err_client_2:
|
|
|
|
preempt_client_fini(&client[2]);
|
|
|
|
err_client_1:
|
|
|
|
preempt_client_fini(&client[1]);
|
|
|
|
err_client_0:
|
|
|
|
preempt_client_fini(&client[0]);
|
|
|
|
err_unlock:
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
err = -EIO;
|
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_wedged:
|
|
|
|
for (i = 0; i < ARRAY_SIZE(client); i++)
|
|
|
|
igt_spinner_end(&client[i].spin);
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_client_3;
|
|
|
|
}
|
|
|
|
|
2019-02-05 19:38:35 +07:00
|
|
|
static int live_chain_preempt(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct preempt_client hi, lo;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a chain AB...BA between two contexts (A, B) and request
|
|
|
|
* preemption of the last request. It should then complete before
|
|
|
|
* the previously submitted spinner in B.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
|
|
|
|
|
|
|
if (preempt_client_init(i915, &hi))
|
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
if (preempt_client_init(i915, &lo))
|
|
|
|
goto err_client_hi;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_sched_attr attr = {
|
|
|
|
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
|
|
|
|
};
|
2019-03-22 02:40:31 +07:00
|
|
|
struct igt_live_test t;
|
2019-03-22 02:40:30 +07:00
|
|
|
struct i915_request *rq;
|
|
|
|
int ring_size, count, i;
|
2019-02-05 19:38:35 +07:00
|
|
|
|
2019-03-06 21:25:01 +07:00
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2019-03-22 02:40:30 +07:00
|
|
|
rq = igt_spinner_create_request(&lo.spin,
|
|
|
|
lo.ctx, engine,
|
|
|
|
MI_ARB_CHECK);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
ring_size = rq->wa_tail - rq->head;
|
|
|
|
if (ring_size < 0)
|
|
|
|
ring_size += rq->ring->size;
|
|
|
|
ring_size = rq->ring->size / ring_size;
|
|
|
|
pr_debug("%s(%s): Using maximum of %d requests\n",
|
|
|
|
__func__, engine->name, ring_size);
|
2019-02-05 19:38:35 +07:00
|
|
|
|
2019-03-22 02:40:30 +07:00
|
|
|
igt_spinner_end(&lo.spin);
|
|
|
|
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
|
|
|
|
pr_err("Timed out waiting to flush %s\n", engine->name);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2019-03-22 02:40:31 +07:00
|
|
|
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
|
2019-03-22 02:40:30 +07:00
|
|
|
for_each_prime_number_from(count, 1, ring_size) {
|
2019-02-05 19:38:35 +07:00
|
|
|
rq = igt_spinner_create_request(&hi.spin,
|
|
|
|
hi.ctx, engine,
|
|
|
|
MI_ARB_CHECK);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
if (!igt_wait_for_spinner(&hi.spin, rq))
|
|
|
|
goto err_wedged;
|
|
|
|
|
|
|
|
rq = igt_spinner_create_request(&lo.spin,
|
|
|
|
lo.ctx, engine,
|
|
|
|
MI_ARB_CHECK);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(lo.ctx, engine);
|
2019-02-05 19:38:35 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(hi.ctx, engine);
|
2019-02-05 19:38:35 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
engine->schedule(rq, &attr);
|
|
|
|
|
|
|
|
igt_spinner_end(&hi.spin);
|
|
|
|
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
|
|
|
|
struct drm_printer p =
|
|
|
|
drm_info_printer(i915->drm.dev);
|
|
|
|
|
|
|
|
pr_err("Failed to preempt over chain of %d\n",
|
|
|
|
count);
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
|
|
|
igt_spinner_end(&lo.spin);
|
2019-03-22 02:40:30 +07:00
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(lo.ctx, engine);
|
2019-03-22 02:40:30 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
goto err_wedged;
|
|
|
|
i915_request_add(rq);
|
|
|
|
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
|
|
|
|
struct drm_printer p =
|
|
|
|
drm_info_printer(i915->drm.dev);
|
|
|
|
|
|
|
|
pr_err("Failed to flush low priority chain of %d requests\n",
|
|
|
|
count);
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
2019-02-05 19:38:35 +07:00
|
|
|
}
|
2019-03-22 02:40:31 +07:00
|
|
|
|
|
|
|
if (igt_live_test_end(&t)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_wedged;
|
|
|
|
}
|
2019-02-05 19:38:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_client_lo:
|
|
|
|
preempt_client_fini(&lo);
|
|
|
|
err_client_hi:
|
|
|
|
preempt_client_fini(&hi);
|
|
|
|
err_unlock:
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
|
|
err = -EIO;
|
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err_wedged:
|
|
|
|
igt_spinner_end(&hi.spin);
|
|
|
|
igt_spinner_end(&lo.spin);
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_client_lo;
|
|
|
|
}
|
|
|
|
|
2018-07-16 20:21:54 +07:00
|
|
|
static int live_preempt_hang(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct i915_gem_context *ctx_hi, *ctx_lo;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct igt_spinner spin_hi, spin_lo;
|
2018-07-16 20:21:54 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2018-07-16 20:21:54 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!intel_has_reset_engine(i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(i915);
|
2018-07-16 20:21:54 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_hi, i915))
|
2018-07-16 20:21:54 +07:00
|
|
|
goto err_unlock;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (igt_spinner_init(&spin_lo, i915))
|
2018-07-16 20:21:54 +07:00
|
|
|
goto err_spin_hi;
|
|
|
|
|
|
|
|
ctx_hi = kernel_context(i915);
|
|
|
|
if (!ctx_hi)
|
|
|
|
goto err_spin_lo;
|
|
|
|
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
|
|
|
|
|
|
|
ctx_lo = kernel_context(i915);
|
|
|
|
if (!ctx_lo)
|
|
|
|
goto err_ctx_hi;
|
|
|
|
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
|
|
|
if (!intel_engine_has_preemption(engine))
|
|
|
|
continue;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_lo, rq)) {
|
2018-07-16 20:21:54 +07:00
|
|
|
GEM_TRACE("lo spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
|
|
|
|
MI_ARB_CHECK);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (IS_ERR(rq)) {
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&engine->execlists.preempt_hang.completion);
|
|
|
|
engine->execlists.preempt_hang.inject_hang = true;
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
|
|
|
|
HZ / 10)) {
|
|
|
|
pr_err("Preemption did not occur within timeout!");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
|
|
|
i915_reset_engine(engine, NULL);
|
|
|
|
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
|
|
|
|
|
|
|
engine->execlists.preempt_hang.inject_hang = false;
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
if (!igt_wait_for_spinner(&spin_hi, rq)) {
|
2018-07-16 20:21:54 +07:00
|
|
|
GEM_TRACE("hi spinner failed to start\n");
|
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
i915_gem_set_wedged(i915);
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_end(&spin_hi);
|
|
|
|
igt_spinner_end(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_ctx_lo;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err_ctx_lo:
|
|
|
|
kernel_context_close(ctx_lo);
|
|
|
|
err_ctx_hi:
|
|
|
|
kernel_context_close(ctx_hi);
|
|
|
|
err_spin_lo:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_lo);
|
2018-07-16 20:21:54 +07:00
|
|
|
err_spin_hi:
|
2018-11-30 15:02:53 +07:00
|
|
|
igt_spinner_fini(&spin_hi);
|
2018-07-16 20:21:54 +07:00
|
|
|
err_unlock:
|
|
|
|
igt_flush_test(i915, I915_WAIT_LOCKED);
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(i915, wakeref);
|
2018-07-16 20:21:54 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:31:58 +07:00
|
|
|
static int random_range(struct rnd_state *rnd, int min, int max)
|
|
|
|
{
|
|
|
|
return i915_prandom_u32_max_state(max - min, rnd) + min;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int random_priority(struct rnd_state *rnd)
|
|
|
|
{
|
|
|
|
return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct preempt_smoke {
|
|
|
|
struct drm_i915_private *i915;
|
|
|
|
struct i915_gem_context **contexts;
|
2018-10-01 19:32:01 +07:00
|
|
|
struct intel_engine_cs *engine;
|
2018-10-01 19:32:02 +07:00
|
|
|
struct drm_i915_gem_object *batch;
|
2018-09-25 15:31:58 +07:00
|
|
|
unsigned int ncontext;
|
|
|
|
struct rnd_state prng;
|
2018-10-01 19:32:01 +07:00
|
|
|
unsigned long count;
|
2018-09-25 15:31:58 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
|
|
|
|
{
|
|
|
|
return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
|
|
|
|
&smoke->prng)];
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_submit(struct preempt_smoke *smoke,
|
|
|
|
struct i915_gem_context *ctx, int prio,
|
|
|
|
struct drm_i915_gem_object *batch)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct i915_vma *vma = NULL;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (batch) {
|
|
|
|
vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->sched.priority = prio;
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(ctx, smoke->engine);
|
2018-10-01 19:32:02 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma) {
|
|
|
|
err = rq->engine->emit_bb_start(rq,
|
|
|
|
vma->node.start,
|
|
|
|
PAGE_SIZE, 0);
|
|
|
|
if (!err)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
unpin:
|
|
|
|
if (vma)
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
static int smoke_crescendo_thread(void *arg)
|
|
|
|
{
|
|
|
|
struct preempt_smoke *smoke = arg;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
do {
|
|
|
|
struct i915_gem_context *ctx = smoke_context(smoke);
|
2018-10-01 19:32:02 +07:00
|
|
|
int err;
|
2018-10-01 19:32:01 +07:00
|
|
|
|
|
|
|
mutex_lock(&smoke->i915->drm.struct_mutex);
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_submit(smoke,
|
|
|
|
ctx, count % I915_PRIORITY_MAX,
|
|
|
|
smoke->batch);
|
2018-10-01 19:32:01 +07:00
|
|
|
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
2018-10-01 19:32:02 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2018-10-01 19:32:01 +07:00
|
|
|
|
|
|
|
count++;
|
|
|
|
} while (!__igt_timeout(end_time, NULL));
|
|
|
|
|
|
|
|
smoke->count = count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
|
|
|
#define BATCH BIT(0)
|
2018-09-25 15:31:58 +07:00
|
|
|
{
|
2018-10-01 19:32:01 +07:00
|
|
|
struct task_struct *tsk[I915_NUM_ENGINES] = {};
|
|
|
|
struct preempt_smoke arg[I915_NUM_ENGINES];
|
2018-09-25 15:31:58 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
unsigned long count;
|
2018-10-01 19:32:01 +07:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_unlock(&smoke->i915->drm.struct_mutex);
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
for_each_engine(engine, smoke->i915, id) {
|
2018-10-01 19:32:01 +07:00
|
|
|
arg[id] = *smoke;
|
|
|
|
arg[id].engine = engine;
|
2018-10-01 19:32:02 +07:00
|
|
|
if (!(flags & BATCH))
|
|
|
|
arg[id].batch = NULL;
|
2018-10-01 19:32:01 +07:00
|
|
|
arg[id].count = 0;
|
|
|
|
|
|
|
|
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
|
|
|
|
"igt/smoke:%d", id);
|
|
|
|
if (IS_ERR(tsk[id])) {
|
|
|
|
err = PTR_ERR(tsk[id]);
|
|
|
|
break;
|
|
|
|
}
|
2018-10-02 20:29:27 +07:00
|
|
|
get_task_struct(tsk[id]);
|
2018-10-01 19:32:01 +07:00
|
|
|
}
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
count = 0;
|
|
|
|
for_each_engine(engine, smoke->i915, id) {
|
|
|
|
int status;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
if (IS_ERR_OR_NULL(tsk[id]))
|
|
|
|
continue;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
status = kthread_stop(tsk[id]);
|
|
|
|
if (status && !err)
|
|
|
|
err = status;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
count += arg[id].count;
|
2018-10-02 20:29:27 +07:00
|
|
|
|
|
|
|
put_task_struct(tsk[id]);
|
2018-09-25 15:31:58 +07:00
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:01 +07:00
|
|
|
mutex_lock(&smoke->i915->drm.struct_mutex);
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
|
|
|
|
count, flags,
|
2019-03-06 01:03:30 +07:00
|
|
|
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
|
2018-09-25 15:31:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
|
2018-09-25 15:31:58 +07:00
|
|
|
{
|
|
|
|
enum intel_engine_id id;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
do {
|
2018-10-01 19:32:02 +07:00
|
|
|
for_each_engine(smoke->engine, smoke->i915, id) {
|
2018-09-25 15:31:58 +07:00
|
|
|
struct i915_gem_context *ctx = smoke_context(smoke);
|
2018-10-01 19:32:02 +07:00
|
|
|
int err;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_submit(smoke,
|
|
|
|
ctx, random_priority(&smoke->prng),
|
|
|
|
flags & BATCH ? smoke->batch : NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
} while (!__igt_timeout(end_time, NULL));
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
|
|
|
|
count, flags,
|
2019-03-06 01:03:30 +07:00
|
|
|
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
|
2018-09-25 15:31:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_preempt_smoke(void *arg)
|
|
|
|
{
|
|
|
|
struct preempt_smoke smoke = {
|
|
|
|
.i915 = arg,
|
|
|
|
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
|
|
|
|
.ncontext = 1024,
|
|
|
|
};
|
2018-10-01 19:32:02 +07:00
|
|
|
const unsigned int phase[] = { 0, BATCH };
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2019-03-22 02:40:31 +07:00
|
|
|
struct igt_live_test t;
|
2018-09-25 15:31:58 +07:00
|
|
|
int err = -ENOMEM;
|
2018-10-01 19:32:02 +07:00
|
|
|
u32 *cs;
|
2018-09-25 15:31:58 +07:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
smoke.contexts = kmalloc_array(smoke.ncontext,
|
|
|
|
sizeof(*smoke.contexts),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!smoke.contexts)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mutex_lock(&smoke.i915->drm.struct_mutex);
|
2019-01-14 21:21:22 +07:00
|
|
|
wakeref = intel_runtime_pm_get(smoke.i915);
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(smoke.batch)) {
|
|
|
|
err = PTR_ERR(smoke.batch);
|
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
err = PTR_ERR(cs);
|
|
|
|
goto err_batch;
|
|
|
|
}
|
|
|
|
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
|
|
|
|
cs[n] = MI_ARB_CHECK;
|
|
|
|
cs[n] = MI_BATCH_BUFFER_END;
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 23:19:07 +07:00
|
|
|
i915_gem_object_flush_map(smoke.batch);
|
2018-10-01 19:32:02 +07:00
|
|
|
i915_gem_object_unpin_map(smoke.batch);
|
|
|
|
|
2019-03-22 02:40:31 +07:00
|
|
|
if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
|
|
|
|
err = -EIO;
|
|
|
|
goto err_batch;
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:31:58 +07:00
|
|
|
for (n = 0; n < smoke.ncontext; n++) {
|
|
|
|
smoke.contexts[n] = kernel_context(smoke.i915);
|
|
|
|
if (!smoke.contexts[n])
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
for (n = 0; n < ARRAY_SIZE(phase); n++) {
|
|
|
|
err = smoke_crescendo(&smoke, phase[n]);
|
|
|
|
if (err)
|
|
|
|
goto err_ctx;
|
2018-09-25 15:31:58 +07:00
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err = smoke_random(&smoke, phase[n]);
|
|
|
|
if (err)
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
2018-09-25 15:31:58 +07:00
|
|
|
|
|
|
|
err_ctx:
|
2019-03-22 02:40:31 +07:00
|
|
|
if (igt_live_test_end(&t))
|
2018-09-25 15:31:58 +07:00
|
|
|
err = -EIO;
|
|
|
|
|
|
|
|
for (n = 0; n < smoke.ncontext; n++) {
|
|
|
|
if (!smoke.contexts[n])
|
|
|
|
break;
|
|
|
|
kernel_context_close(smoke.contexts[n]);
|
|
|
|
}
|
|
|
|
|
2018-10-01 19:32:02 +07:00
|
|
|
err_batch:
|
|
|
|
i915_gem_object_put(smoke.batch);
|
|
|
|
err_unlock:
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_runtime_pm_put(smoke.i915, wakeref);
|
2018-09-25 15:31:58 +07:00
|
|
|
mutex_unlock(&smoke.i915->drm.struct_mutex);
|
|
|
|
kfree(smoke.contexts);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(live_sanitycheck),
|
2019-03-29 20:40:24 +07:00
|
|
|
SUBTEST(live_busywait_preempt),
|
2018-04-04 16:33:29 +07:00
|
|
|
SUBTEST(live_preempt),
|
|
|
|
SUBTEST(live_late_preempt),
|
drm/i915/execlists: Suppress preempting self
In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.
However, when we avoid preempting ELSP[0], we still retain the preemption
value as it may match a second preemption request within the same time period
that we need to resolve after the next CS event. However, since we only
store the maximum preemption priority seen, it may not match the
subsequent event and so we should double check whether or not we
actually do need to trigger a preempt-to-idle by comparing the top
priorities from each queue. Later, this gives us a hook for finer
control over deciding whether the preempt-to-idle is justified.
The sequence of events where we end up preempting for no avail is:
1. Queue requests/contexts A, B
2. Priority boost A; no preemption as it is executing, but keep hint
3. After CS switch, B is less than hint, force preempt-to-idle
4. Resubmit B after idling
v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.
v3: Demonstrate the stale preemption hint with a selftest
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
|
|
|
SUBTEST(live_suppress_self_preempt),
|
2019-03-01 05:06:39 +07:00
|
|
|
SUBTEST(live_suppress_wait_preempt),
|
2019-02-05 19:38:35 +07:00
|
|
|
SUBTEST(live_chain_preempt),
|
2018-07-16 20:21:54 +07:00
|
|
|
SUBTEST(live_preempt_hang),
|
2018-09-25 15:31:58 +07:00
|
|
|
SUBTEST(live_preempt_smoke),
|
2018-04-04 16:33:29 +07:00
|
|
|
};
|
2018-05-04 19:42:02 +07:00
|
|
|
|
|
|
|
if (!HAS_EXECLISTS(i915))
|
|
|
|
return 0;
|
|
|
|
|
2019-02-20 21:56:37 +07:00
|
|
|
if (i915_terminally_wedged(i915))
|
2018-07-06 18:45:10 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-04-04 16:33:29 +07:00
|
|
|
return i915_subtests(tests, i915);
|
|
|
|
}
|