linux_dsm_epyc7002/drivers/gpu/drm/i915/gt/selftest_lrc.c

1331 lines
29 KiB
C
Raw Normal View History

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
#include "gt/intel_reset.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_gem_utils.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/mock_context.h"
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
if (!ctx)
goto err_spin;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
}
}
err = 0;
err_ctx:
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
drm/i915/execlists: Enable coarse preemption boundaries for gen8 When we introduced preemption, we chose to keep it disabled for gen8 as supporting preemption inside GPGPU user batches required various w/a in userspace. Since then, the desire to preempt long queues of requests between batches (e.g. within busywaiting semaphores) has grown. So allow arbitration within the busywaits and between requests, but disable arbitration within user batches so that we can preempt between requests and not risk breaking GPGPU. However, since this preemption is much coarser and doesn't interfere with userspace, we decline to include it amongst the scheduler capabilities. (This is also required for us to skip over the preemption selftests that expect to be able to preempt user batches.) Michal suggested that we could perhaps allow preemption inside gen8 userspace batches if we can satisfy ourselves that the default preemption settings are viable with existing userspace (principally OpenCL which already should carry any known workaround). We could then merge the two code paths back into one, even dropping the artifical has-preemption device feature flag. Testcase: igt/gem_exec_scheduler/semaphore-user References: beecec901790 ("drm/i915/execlists: Preemption!") Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Michal Winiarski <michal.winiarski@intel.com> #irc Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190329134024.5254-1-chris@chris-wilson.co.uk
2019-03-29 20:40:24 +07:00
static int live_busywait_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *map;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
* preempt the busywaits used to synchronise between rings.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_unlock;
ctx_hi->sched.priority = INT_MAX;
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority = INT_MIN;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
}
map = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_obj;
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
drm/i915/execlists: Enable coarse preemption boundaries for gen8 When we introduced preemption, we chose to keep it disabled for gen8 as supporting preemption inside GPGPU user batches required various w/a in userspace. Since then, the desire to preempt long queues of requests between batches (e.g. within busywaiting semaphores) has grown. So allow arbitration within the busywaits and between requests, but disable arbitration within user batches so that we can preempt between requests and not risk breaking GPGPU. However, since this preemption is much coarser and doesn't interfere with userspace, we decline to include it amongst the scheduler capabilities. (This is also required for us to skip over the preemption selftests that expect to be able to preempt user batches.) Michal suggested that we could perhaps allow preemption inside gen8 userspace batches if we can satisfy ourselves that the default preemption settings are viable with existing userspace (principally OpenCL which already should carry any known workaround). We could then merge the two code paths back into one, even dropping the artifical has-preemption device feature flag. Testcase: igt/gem_exec_scheduler/semaphore-user References: beecec901790 ("drm/i915/execlists: Preemption!") Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Michal Winiarski <michal.winiarski@intel.com> #irc Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190329134024.5254-1-chris@chris-wilson.co.uk
2019-03-29 20:40:24 +07:00
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto err_map;
for_each_engine(engine, i915, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
if (!intel_engine_can_store_dword(engine))
continue;
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
/*
* We create two requests. The low priority request
* busywaits on a semaphore (inside the ringbuffer where
* is should be preemptible) and the high priority requests
* uses a MI_STORE_DWORD_IMM to update the semaphore value
* allowing the first request to complete. If preemption
* fails, we hang instead.
*/
lo = igt_request_alloc(ctx_lo, engine);
drm/i915/execlists: Enable coarse preemption boundaries for gen8 When we introduced preemption, we chose to keep it disabled for gen8 as supporting preemption inside GPGPU user batches required various w/a in userspace. Since then, the desire to preempt long queues of requests between batches (e.g. within busywaiting semaphores) has grown. So allow arbitration within the busywaits and between requests, but disable arbitration within user batches so that we can preempt between requests and not risk breaking GPGPU. However, since this preemption is much coarser and doesn't interfere with userspace, we decline to include it amongst the scheduler capabilities. (This is also required for us to skip over the preemption selftests that expect to be able to preempt user batches.) Michal suggested that we could perhaps allow preemption inside gen8 userspace batches if we can satisfy ourselves that the default preemption settings are viable with existing userspace (principally OpenCL which already should carry any known workaround). We could then merge the two code paths back into one, even dropping the artifical has-preemption device feature flag. Testcase: igt/gem_exec_scheduler/semaphore-user References: beecec901790 ("drm/i915/execlists: Preemption!") Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Michal Winiarski <michal.winiarski@intel.com> #irc Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190329134024.5254-1-chris@chris-wilson.co.uk
2019-03-29 20:40:24 +07:00
if (IS_ERR(lo)) {
err = PTR_ERR(lo);
goto err_vma;
}
cs = intel_ring_begin(lo, 8);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(lo);
goto err_vma;
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
*cs++ = 1;
/* XXX Do we need a flush + invalidate here? */
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
intel_ring_advance(lo, cs);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
goto err_vma;
}
hi = igt_request_alloc(ctx_hi, engine);
drm/i915/execlists: Enable coarse preemption boundaries for gen8 When we introduced preemption, we chose to keep it disabled for gen8 as supporting preemption inside GPGPU user batches required various w/a in userspace. Since then, the desire to preempt long queues of requests between batches (e.g. within busywaiting semaphores) has grown. So allow arbitration within the busywaits and between requests, but disable arbitration within user batches so that we can preempt between requests and not risk breaking GPGPU. However, since this preemption is much coarser and doesn't interfere with userspace, we decline to include it amongst the scheduler capabilities. (This is also required for us to skip over the preemption selftests that expect to be able to preempt user batches.) Michal suggested that we could perhaps allow preemption inside gen8 userspace batches if we can satisfy ourselves that the default preemption settings are viable with existing userspace (principally OpenCL which already should carry any known workaround). We could then merge the two code paths back into one, even dropping the artifical has-preemption device feature flag. Testcase: igt/gem_exec_scheduler/semaphore-user References: beecec901790 ("drm/i915/execlists: Preemption!") Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Michal Winiarski <michal.winiarski@intel.com> #irc Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190329134024.5254-1-chris@chris-wilson.co.uk
2019-03-29 20:40:24 +07:00
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
goto err_vma;
}
cs = intel_ring_begin(hi, 4);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
goto err_vma;
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(hi, cs);
i915_request_add(hi);
if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_vma;
}
}
err = 0;
err_vma:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
pr_err("Logical preemption supported, but not exposed\n");
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
drm/i915/execlists: Suppress preempting self In order to avoid preempting ourselves, we currently refuse to schedule the tasklet if we reschedule an inflight context. However, this glosses over a few issues such as what happens after a CS completion event and we then preempt the newly executing context with itself, or if something else causes a tasklet_schedule triggering the same evaluation to preempt the active context with itself. However, when we avoid preempting ELSP[0], we still retain the preemption value as it may match a second preemption request within the same time period that we need to resolve after the next CS event. However, since we only store the maximum preemption priority seen, it may not match the subsequent event and so we should double check whether or not we actually do need to trigger a preempt-to-idle by comparing the top priorities from each queue. Later, this gives us a hook for finer control over deciding whether the preempt-to-idle is justified. The sequence of events where we end up preempting for no avail is: 1. Queue requests/contexts A, B 2. Priority boost A; no preemption as it is executing, but keep hint 3. After CS switch, B is less than hint, force preempt-to-idle 4. Resubmit B after idling v2: We can simplify a bunch of tests based on the knowledge that PI will ensure that earlier requests along the same context will have the highest priority. v3: Demonstrate the stale preemption hint with a selftest References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
struct preempt_client {
struct igt_spinner spin;
struct i915_gem_context *ctx;
};
static int preempt_client_init(struct drm_i915_private *i915,
struct preempt_client *c)
{
c->ctx = kernel_context(i915);
if (!c->ctx)
return -ENOMEM;
if (igt_spinner_init(&c->spin, i915))
goto err_ctx;
return 0;
err_ctx:
kernel_context_close(c->ctx);
return -ENOMEM;
}
static void preempt_client_fini(struct preempt_client *c)
{
igt_spinner_fini(&c->spin);
kernel_context_close(c->ctx);
}
static int live_suppress_self_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
};
struct preempt_client a, b;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
* Verify that if a preemption request does not cause a change in
* the current execution order, the preempt-to-idle injection is
* skipped and that we do not accidentally apply it after the CS
* completion event.
*/
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
if (USES_GUC_SUBMISSION(i915))
return 0; /* presume black blox */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (preempt_client_init(i915, &a))
goto err_unlock;
if (preempt_client_init(i915, &b))
goto err_client_a;
for_each_engine(engine, i915, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
drm/i915/execlists: Suppress preempting self In order to avoid preempting ourselves, we currently refuse to schedule the tasklet if we reschedule an inflight context. However, this glosses over a few issues such as what happens after a CS completion event and we then preempt the newly executing context with itself, or if something else causes a tasklet_schedule triggering the same evaluation to preempt the active context with itself. However, when we avoid preempting ELSP[0], we still retain the preemption value as it may match a second preemption request within the same time period that we need to resolve after the next CS event. However, since we only store the maximum preemption priority seen, it may not match the subsequent event and so we should double check whether or not we actually do need to trigger a preempt-to-idle by comparing the top priorities from each queue. Later, this gives us a hook for finer control over deciding whether the preempt-to-idle is justified. The sequence of events where we end up preempting for no avail is: 1. Queue requests/contexts A, B 2. Priority boost A; no preemption as it is executing, but keep hint 3. After CS switch, B is less than hint, force preempt-to-idle 4. Resubmit B after idling v2: We can simplify a bunch of tests based on the knowledge that PI will ensure that earlier requests along the same context will have the highest priority. v3: Demonstrate the stale preemption hint with a selftest References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
a.ctx, engine,
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
goto err_wedged;
}
for (depth = 0; depth < 8; depth++) {
rq_b = igt_spinner_create_request(&b.spin,
b.ctx, engine,
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
goto err_client_b;
}
i915_request_add(rq_b);
GEM_BUG_ON(i915_request_completed(rq_a));
engine->schedule(rq_a, &attr);
igt_spinner_end(&a.spin);
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
goto err_wedged;
}
swap(a, b);
rq_a = rq_b;
}
igt_spinner_end(&a.spin);
if (engine->execlists.preempt_hang.count) {
pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
engine->execlists.preempt_hang.count,
depth);
err = -EINVAL;
goto err_client_b;
}
if (igt_flush_test(i915, I915_WAIT_LOCKED))
goto err_wedged;
}
err = 0;
err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_client_b;
}
2019-03-01 05:06:39 +07:00
static int __i915_sw_fence_call
dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
static struct i915_request *dummy_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
rq = kzalloc(sizeof(*rq), GFP_KERNEL);
if (!rq)
return NULL;
INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine;
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
rq->fence.seqno = 1;
BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
GEM_BUG_ON(i915_request_completed(rq));
i915_sw_fence_init(&rq->submit, dummy_notify);
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
2019-03-01 05:06:39 +07:00
return rq;
}
static void dummy_request_free(struct i915_request *dummy)
{
/* We have to fake the CS interrupt to kick the next request */
i915_sw_fence_commit(&dummy->submit);
2019-03-01 05:06:39 +07:00
i915_request_mark_complete(dummy);
dma_fence_signal(&dummy->fence);
2019-03-01 05:06:39 +07:00
i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit);
dma_fence_free(&dummy->fence);
}
static int live_suppress_wait_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct preempt_client client[4];
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
int i;
/*
* Waiters are given a little priority nudge, but not enough
* to actually cause any preemption. Double check that we do
* not needlessly generate preempt-to-idle cycles.
*/
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
goto err_unlock;
if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
goto err_client_0;
if (preempt_client_init(i915, &client[2])) /* head of queue */
goto err_client_1;
if (preempt_client_init(i915, &client[3])) /* bystander */
goto err_client_2;
for_each_engine(engine, i915, id) {
int depth;
if (!intel_engine_has_preemption(engine))
continue;
2019-03-01 05:06:39 +07:00
if (!engine->emit_init_breadcrumb)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
struct i915_request *rq[ARRAY_SIZE(client)];
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
dummy = dummy_request(engine);
if (!dummy)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
rq[i] = igt_spinner_create_request(&client[i].spin,
client[i].ctx, engine,
MI_NOOP);
if (IS_ERR(rq[i])) {
err = PTR_ERR(rq[i]);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
__i915_active_request_set(&rq[i]->timeline->last_request,
dummy);
i915_request_add(rq[i]);
}
dummy_request_free(dummy);
GEM_BUG_ON(i915_request_completed(rq[0]));
if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
pr_err("%s: First client failed to start\n",
engine->name);
goto err_wedged;
}
GEM_BUG_ON(!i915_request_started(rq[0]));
if (i915_request_wait(rq[depth],
I915_WAIT_LOCKED |
I915_WAIT_PRIORITY,
1) != -ETIME) {
pr_err("%s: Waiter depth:%d completed!\n",
engine->name, depth);
goto err_wedged;
}
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
engine->name,
engine->execlists.preempt_hang.count,
depth);
err = -EINVAL;
goto err_client_3;
}
}
}
err = 0;
err_client_3:
preempt_client_fini(&client[3]);
err_client_2:
preempt_client_fini(&client[2]);
err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
* Build a chain AB...BA between two contexts (A, B) and request
* preemption of the last request. It should then complete before
* the previously submitted spinner in B.
*/
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (preempt_client_init(i915, &hi))
goto err_unlock;
if (preempt_client_init(i915, &lo))
goto err_client_hi;
for_each_engine(engine, i915, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
struct igt_live_test t;
struct i915_request *rq;
int ring_size, count, i;
if (!intel_engine_has_preemption(engine))
continue;
rq = igt_spinner_create_request(&lo.spin,
lo.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
if (ring_size < 0)
ring_size += rq->ring->size;
ring_size = rq->ring->size / ring_size;
pr_debug("%s(%s): Using maximum of %d requests\n",
__func__, engine->name, ring_size);
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
goto err_wedged;
}
if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged;
rq = igt_spinner_create_request(&lo.spin,
lo.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
for (i = 0; i < count; i++) {
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
}
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
engine->schedule(rq, &attr);
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
goto err_wedged;
}
}
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_wedged;
}
}
err = 0;
err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_client_lo;
}
static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
if (!intel_has_reset_engine(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
init_completion(&engine->execlists.preempt_hang.completion);
engine->execlists.preempt_hang.inject_hang = true;
i915_request_add(rq);
if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
i915_reset_engine(engine, NULL);
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
}
}
err = 0;
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int random_range(struct rnd_state *rnd, int min, int max)
{
return i915_prandom_u32_max_state(max - min, rnd) + min;
}
static int random_priority(struct rnd_state *rnd)
{
return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
}
struct preempt_smoke {
struct drm_i915_private *i915;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
};
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
{
return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
&smoke->prng)];
}
static int smoke_submit(struct preempt_smoke *smoke,
struct i915_gem_context *ctx, int prio,
struct drm_i915_gem_object *batch)
{
struct i915_request *rq;
struct i915_vma *vma = NULL;
int err = 0;
if (batch) {
vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return err;
}
ctx->sched.priority = prio;
rq = igt_request_alloc(ctx, smoke->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
if (vma) {
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
}
i915_request_add(rq);
unpin:
if (vma)
i915_vma_unpin(vma);
return err;
}
static int smoke_crescendo_thread(void *arg)
{
struct preempt_smoke *smoke = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
count++;
} while (!__igt_timeout(end_time, NULL));
smoke->count = count;
return 0;
}
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct preempt_smoke arg[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count;
int err = 0;
mutex_unlock(&smoke->i915->drm.struct_mutex);
for_each_engine(engine, smoke->i915, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
"igt/smoke:%d", id);
if (IS_ERR(tsk[id])) {
err = PTR_ERR(tsk[id]);
break;
}
drm/i915/selftests: Hold task_struct ref for smoking kthread As the kthread may terminate itself, the parent must hold a task_struct reference for it to call kthread_stop(). <4> [498.827675] stack segment: 0000 [#1] PREEMPT SMP PTI <4> [498.827683] CPU: 0 PID: 3872 Comm: drv_selftest Tainted: G U 4.19.0-rc6-CI-CI_DRM_4915+ #1 <4> [498.827686] Hardware name: Intel Corporation NUC7CJYH/NUC7JYB, BIOS JYGLKCPX.86A.0027.2018.0125.1347 01/25/2018 <4> [498.827695] RIP: 0010:kthread_stop+0x36/0x210 <4> [498.827698] Code: 05 df 3d f6 7e 89 c0 48 0f a3 05 95 f8 29 01 0f 82 56 01 00 00 f0 ff 43 20 f6 43 26 20 0f 84 7f 01 00 00 48 8b ab b0 05 00 00 <f0> 80 4d 00 02 48 89 df e8 5d ff ff ff 48 89 df e8 15 c7 00 00 48 <4> [498.827701] RSP: 0018:ffffc900003937d0 EFLAGS: 00010202 <4> [498.827704] RAX: 0000000000000001 RBX: ffff8802165ece40 RCX: 0000000000000001 <4> [498.827707] RDX: 0000000000000000 RSI: 00000000ffffffff RDI: ffffffff82247460 <4> [498.827709] RBP: 6b6b6b6b6b6b6b6b R08: 00000000581395cb R09: 0000000000000001 <4> [498.827711] R10: 0000000000000000 R11: 0000000000000000 R12: ffffc90000393868 <4> [498.827713] R13: ffffc900003937f0 R14: ffff88026c068040 R15: 0000000000001057 <4> [498.827716] FS: 00007fc0c464b980(0000) GS:ffff880277e00000(0000) knlGS:0000000000000000 <4> [498.827718] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [498.827720] CR2: 000056178c2feca0 CR3: 000000026983c000 CR4: 0000000000340ef0 <4> [498.827723] Call Trace: <4> [498.827824] smoke_crescendo+0x14c/0x1d0 [i915] <4> [498.827837] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4> [498.827898] ? __i915_gem_context_pin_hw_id+0x69/0x5f0 [i915] <4> [498.827902] ? ida_alloc_range+0x1f2/0x3d0 <4> [498.827907] ? __mutex_unlock_slowpath+0x46/0x2b0 <4> [498.827914] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.827979] live_preempt_smoke+0x2c2/0x470 [i915] <4> [498.828047] __i915_subtests+0x5e/0xf0 [i915] <4> [498.828113] __run_selftests+0x10b/0x190 [i915] <4> [498.828175] i915_live_selftests+0x2c/0x60 [i915] <4> [498.828232] i915_pci_probe+0x50/0xa0 [i915] <4> [498.828238] pci_device_probe+0xa1/0x130 <4> [498.828244] really_probe+0x25d/0x3c0 <4> [498.828249] driver_probe_device+0x10a/0x120 <4> [498.828253] __driver_attach+0xdb/0x100 <4> [498.828256] ? driver_probe_device+0x120/0x120 <4> [498.828259] bus_for_each_dev+0x74/0xc0 <4> [498.828264] bus_add_driver+0x15f/0x250 <4> [498.828268] ? 0xffffffffa00c3000 <4> [498.828271] driver_register+0x56/0xe0 <4> [498.828274] ? 0xffffffffa00c3000 <4> [498.828278] do_one_initcall+0x58/0x2e0 <4> [498.828281] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.828285] ? do_init_module+0x1d/0x1ea <4> [498.828289] ? rcu_read_lock_sched_held+0x6f/0x80 <4> [498.828293] ? kmem_cache_alloc_trace+0x264/0x290 <4> [498.828297] do_init_module+0x56/0x1ea <4> [498.828302] load_module+0x26f5/0x29d0 <4> [498.828309] ? vfs_read+0x122/0x140 <4> [498.828318] ? __se_sys_finit_module+0xd3/0xf0 <4> [498.828321] __se_sys_finit_module+0xd3/0xf0 <4> [498.828329] do_syscall_64+0x55/0x190 <4> [498.828332] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4> [498.828335] RIP: 0033:0x7fc0c3f16839 Fixes: 992d2098ef0b ("drm/i915/selftests: Split preemption smoke test into threads") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181002132927.7669-1-chris@chris-wilson.co.uk
2018-10-02 20:29:27 +07:00
get_task_struct(tsk[id]);
}
count = 0;
for_each_engine(engine, smoke->i915, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
continue;
status = kthread_stop(tsk[id]);
if (status && !err)
err = status;
count += arg[id].count;
drm/i915/selftests: Hold task_struct ref for smoking kthread As the kthread may terminate itself, the parent must hold a task_struct reference for it to call kthread_stop(). <4> [498.827675] stack segment: 0000 [#1] PREEMPT SMP PTI <4> [498.827683] CPU: 0 PID: 3872 Comm: drv_selftest Tainted: G U 4.19.0-rc6-CI-CI_DRM_4915+ #1 <4> [498.827686] Hardware name: Intel Corporation NUC7CJYH/NUC7JYB, BIOS JYGLKCPX.86A.0027.2018.0125.1347 01/25/2018 <4> [498.827695] RIP: 0010:kthread_stop+0x36/0x210 <4> [498.827698] Code: 05 df 3d f6 7e 89 c0 48 0f a3 05 95 f8 29 01 0f 82 56 01 00 00 f0 ff 43 20 f6 43 26 20 0f 84 7f 01 00 00 48 8b ab b0 05 00 00 <f0> 80 4d 00 02 48 89 df e8 5d ff ff ff 48 89 df e8 15 c7 00 00 48 <4> [498.827701] RSP: 0018:ffffc900003937d0 EFLAGS: 00010202 <4> [498.827704] RAX: 0000000000000001 RBX: ffff8802165ece40 RCX: 0000000000000001 <4> [498.827707] RDX: 0000000000000000 RSI: 00000000ffffffff RDI: ffffffff82247460 <4> [498.827709] RBP: 6b6b6b6b6b6b6b6b R08: 00000000581395cb R09: 0000000000000001 <4> [498.827711] R10: 0000000000000000 R11: 0000000000000000 R12: ffffc90000393868 <4> [498.827713] R13: ffffc900003937f0 R14: ffff88026c068040 R15: 0000000000001057 <4> [498.827716] FS: 00007fc0c464b980(0000) GS:ffff880277e00000(0000) knlGS:0000000000000000 <4> [498.827718] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [498.827720] CR2: 000056178c2feca0 CR3: 000000026983c000 CR4: 0000000000340ef0 <4> [498.827723] Call Trace: <4> [498.827824] smoke_crescendo+0x14c/0x1d0 [i915] <4> [498.827837] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4> [498.827898] ? __i915_gem_context_pin_hw_id+0x69/0x5f0 [i915] <4> [498.827902] ? ida_alloc_range+0x1f2/0x3d0 <4> [498.827907] ? __mutex_unlock_slowpath+0x46/0x2b0 <4> [498.827914] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.827979] live_preempt_smoke+0x2c2/0x470 [i915] <4> [498.828047] __i915_subtests+0x5e/0xf0 [i915] <4> [498.828113] __run_selftests+0x10b/0x190 [i915] <4> [498.828175] i915_live_selftests+0x2c/0x60 [i915] <4> [498.828232] i915_pci_probe+0x50/0xa0 [i915] <4> [498.828238] pci_device_probe+0xa1/0x130 <4> [498.828244] really_probe+0x25d/0x3c0 <4> [498.828249] driver_probe_device+0x10a/0x120 <4> [498.828253] __driver_attach+0xdb/0x100 <4> [498.828256] ? driver_probe_device+0x120/0x120 <4> [498.828259] bus_for_each_dev+0x74/0xc0 <4> [498.828264] bus_add_driver+0x15f/0x250 <4> [498.828268] ? 0xffffffffa00c3000 <4> [498.828271] driver_register+0x56/0xe0 <4> [498.828274] ? 0xffffffffa00c3000 <4> [498.828278] do_one_initcall+0x58/0x2e0 <4> [498.828281] ? rcu_lockdep_current_cpu_online+0x8f/0xd0 <4> [498.828285] ? do_init_module+0x1d/0x1ea <4> [498.828289] ? rcu_read_lock_sched_held+0x6f/0x80 <4> [498.828293] ? kmem_cache_alloc_trace+0x264/0x290 <4> [498.828297] do_init_module+0x56/0x1ea <4> [498.828302] load_module+0x26f5/0x29d0 <4> [498.828309] ? vfs_read+0x122/0x140 <4> [498.828318] ? __se_sys_finit_module+0xd3/0xf0 <4> [498.828321] __se_sys_finit_module+0xd3/0xf0 <4> [498.828329] do_syscall_64+0x55/0x190 <4> [498.828332] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4> [498.828335] RIP: 0033:0x7fc0c3f16839 Fixes: 992d2098ef0b ("drm/i915/selftests: Split preemption smoke test into threads") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181002132927.7669-1-chris@chris-wilson.co.uk
2018-10-02 20:29:27 +07:00
put_task_struct(tsk[id]);
}
mutex_lock(&smoke->i915->drm.struct_mutex);
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
{
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
for_each_engine(smoke->engine, smoke->i915, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
err = smoke_submit(smoke,
ctx, random_priority(&smoke->prng),
flags & BATCH ? smoke->batch : NULL);
if (err)
return err;
count++;
}
} while (!__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
.i915 = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
sizeof(*smoke.contexts),
GFP_KERNEL);
if (!smoke.contexts)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(smoke.i915);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
goto err_unlock;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
}
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
drm/i915: Flush pages on acquisition When we return pages to the system, we ensure that they are marked as being in the CPU domain since any external access is uncontrolled and we must assume the worst. This means that we need to always flush the pages on acquisition if we need to use them on the GPU, and from the beginning have used set-domain. Set-domain is overkill for the purpose as it is a general synchronisation barrier, but our intent is to only flush the pages being swapped in. If we move that flush into the pages acquisition phase, we know then that when we have obj->mm.pages, they are coherent with the GPU and need only maintain that status without resorting to heavy handed use of set-domain. The principle knock-on effect for userspace is through mmap-gtt pagefaulting. Our uAPI has always implied that the GTT mmap was async (especially as when any pagefault occurs is unpredicatable to userspace) and so userspace had to apply explicit domain control itself (set-domain). However, swapping is transparent to the kernel, and so on first fault we need to acquire the pages and make them coherent for access through the GTT. Our use of set-domain here leaks into the uABI that the first pagefault was synchronous. This is unintentional and baring a few igt should be unoticed, nevertheless we bump the uABI version for mmap-gtt to reflect the change in behaviour. Another implication of the change is that gem_create() is presumed to create an object that is coherent with the CPU and is in the CPU write domain, so a set-domain(CPU) following a gem_create() would be a minor operation that merely checked whether we could allocate all pages for the object. On applying this change, a set-domain(CPU) causes a clflush as we acquire the pages. This will have a small impact on mesa as we move the clflush here on !llc from execbuf time to create, but that should have minimal performance impact as the same clflush exists but is now done early and because of the clflush issue, userspace recycles bo and so should resist allocating fresh objects. Internally, the presumption that objects are created in the CPU write-domain and remain so through writes to obj->mm.mapping is more prevalent than I expected; but easy enough to catch and apply a manual flush. For the future, we should push the page flush from the central set_pages() into the callers so that we can more finely control when it is applied, but for now doing it one location is easier to validate, at the cost of sometimes flushing when there is no need. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.william.auld@gmail.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Antonio Argenziano <antonio.argenziano@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 23:19:07 +07:00
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
if (!smoke.contexts[n])
goto err_ctx;
}
for (n = 0; n < ARRAY_SIZE(phase); n++) {
err = smoke_crescendo(&smoke, phase[n]);
if (err)
goto err_ctx;
err = smoke_random(&smoke, phase[n]);
if (err)
goto err_ctx;
}
err_ctx:
if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
if (!smoke.contexts[n])
break;
kernel_context_close(smoke.contexts[n]);
}
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
intel_runtime_pm_put(smoke.i915, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
return err;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
drm/i915/execlists: Enable coarse preemption boundaries for gen8 When we introduced preemption, we chose to keep it disabled for gen8 as supporting preemption inside GPGPU user batches required various w/a in userspace. Since then, the desire to preempt long queues of requests between batches (e.g. within busywaiting semaphores) has grown. So allow arbitration within the busywaits and between requests, but disable arbitration within user batches so that we can preempt between requests and not risk breaking GPGPU. However, since this preemption is much coarser and doesn't interfere with userspace, we decline to include it amongst the scheduler capabilities. (This is also required for us to skip over the preemption selftests that expect to be able to preempt user batches.) Michal suggested that we could perhaps allow preemption inside gen8 userspace batches if we can satisfy ourselves that the default preemption settings are viable with existing userspace (principally OpenCL which already should carry any known workaround). We could then merge the two code paths back into one, even dropping the artifical has-preemption device feature flag. Testcase: igt/gem_exec_scheduler/semaphore-user References: beecec901790 ("drm/i915/execlists: Preemption!") Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Michal Winiarski <michal.winiarski@intel.com> #irc Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190329134024.5254-1-chris@chris-wilson.co.uk
2019-03-29 20:40:24 +07:00
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
drm/i915/execlists: Suppress preempting self In order to avoid preempting ourselves, we currently refuse to schedule the tasklet if we reschedule an inflight context. However, this glosses over a few issues such as what happens after a CS completion event and we then preempt the newly executing context with itself, or if something else causes a tasklet_schedule triggering the same evaluation to preempt the active context with itself. However, when we avoid preempting ELSP[0], we still retain the preemption value as it may match a second preemption request within the same time period that we need to resolve after the next CS event. However, since we only store the maximum preemption priority seen, it may not match the subsequent event and so we should double check whether or not we actually do need to trigger a preempt-to-idle by comparing the top priorities from each queue. Later, this gives us a hook for finer control over deciding whether the preempt-to-idle is justified. The sequence of events where we end up preempting for no avail is: 1. Queue requests/contexts A, B 2. Priority boost A; no preemption as it is executing, but keep hint 3. After CS switch, B is less than hint, force preempt-to-idle 4. Resubmit B after idling v2: We can simplify a bunch of tests based on the knowledge that PI will ensure that earlier requests along the same context will have the highest priority. v3: Demonstrate the stale preemption hint with a selftest References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129185452.20989-4-chris@chris-wilson.co.uk
2019-01-30 01:54:52 +07:00
SUBTEST(live_suppress_self_preempt),
2019-03-01 05:06:39 +07:00
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
};
if (!HAS_EXECLISTS(i915))
return 0;
if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
}