mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-13 10:06:30 +07:00
d19d71fc2b
The request->timeline is only valid until the request is retired (i.e. before it is completed). Upon retiring the request, the context may be unpinned and freed, and along with it the timeline may be freed. We therefore need to be very careful when chasing rq->timeline that the pointer does not disappear beneath us. The vast majority of users are in a protected context, either during request construction or retirement, where the timeline->mutex is held and the timeline cannot disappear. It is those few off the beaten path (where we access a second timeline) that need extra scrutiny -- to be added in the next patch after first adding the warnings about dangerous access. One complication, where we cannot use the timeline->mutex itself, is during request submission onto hardware (under spinlocks). Here, we want to check on the timeline to finalize the breadcrumb, and so we need to impose a second rule to ensure that the request->timeline is indeed valid. As we are submitting the request, it's context and timeline must be pinned, as it will be used by the hardware. Since it is pinned, we know the request->timeline must still be valid, and we cannot submit the idle barrier until after we release the engine->active.lock, ergo while submitting and holding that spinlock, a second thread cannot release the timeline. v2: Don't be lazy inside selftests; hold the timeline->mutex for as long as we need it, and tidy up acquiring the timeline with a bit of refactoring (i915_active_add_request) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
229 lines
4.5 KiB
C
229 lines
4.5 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2018 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/kref.h>
|
|
|
|
#include "gem/i915_gem_pm.h"
|
|
#include "gt/intel_gt.h"
|
|
|
|
#include "i915_selftest.h"
|
|
|
|
#include "igt_flush_test.h"
|
|
#include "lib_sw_fence.h"
|
|
|
|
struct live_active {
|
|
struct i915_active base;
|
|
struct kref ref;
|
|
bool retired;
|
|
};
|
|
|
|
static void __live_get(struct live_active *active)
|
|
{
|
|
kref_get(&active->ref);
|
|
}
|
|
|
|
static void __live_free(struct live_active *active)
|
|
{
|
|
i915_active_fini(&active->base);
|
|
kfree(active);
|
|
}
|
|
|
|
static void __live_release(struct kref *ref)
|
|
{
|
|
struct live_active *active = container_of(ref, typeof(*active), ref);
|
|
|
|
__live_free(active);
|
|
}
|
|
|
|
static void __live_put(struct live_active *active)
|
|
{
|
|
kref_put(&active->ref, __live_release);
|
|
}
|
|
|
|
static int __live_active(struct i915_active *base)
|
|
{
|
|
struct live_active *active = container_of(base, typeof(*active), base);
|
|
|
|
__live_get(active);
|
|
return 0;
|
|
}
|
|
|
|
static void __live_retire(struct i915_active *base)
|
|
{
|
|
struct live_active *active = container_of(base, typeof(*active), base);
|
|
|
|
active->retired = true;
|
|
__live_put(active);
|
|
}
|
|
|
|
static struct live_active *__live_alloc(struct drm_i915_private *i915)
|
|
{
|
|
struct live_active *active;
|
|
|
|
active = kzalloc(sizeof(*active), GFP_KERNEL);
|
|
if (!active)
|
|
return NULL;
|
|
|
|
kref_init(&active->ref);
|
|
i915_active_init(i915, &active->base, __live_active, __live_retire);
|
|
|
|
return active;
|
|
}
|
|
|
|
static struct live_active *
|
|
__live_active_setup(struct drm_i915_private *i915)
|
|
{
|
|
struct intel_engine_cs *engine;
|
|
struct i915_sw_fence *submit;
|
|
struct live_active *active;
|
|
enum intel_engine_id id;
|
|
unsigned int count = 0;
|
|
int err = 0;
|
|
|
|
active = __live_alloc(i915);
|
|
if (!active)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
submit = heap_fence_create(GFP_KERNEL);
|
|
if (!submit) {
|
|
kfree(active);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
err = i915_active_acquire(&active->base);
|
|
if (err)
|
|
goto out;
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
struct i915_request *rq;
|
|
|
|
rq = i915_request_create(engine->kernel_context);
|
|
if (IS_ERR(rq)) {
|
|
err = PTR_ERR(rq);
|
|
break;
|
|
}
|
|
|
|
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
|
|
submit,
|
|
GFP_KERNEL);
|
|
if (err >= 0)
|
|
err = i915_active_add_request(&active->base, rq);
|
|
i915_request_add(rq);
|
|
if (err) {
|
|
pr_err("Failed to track active ref!\n");
|
|
break;
|
|
}
|
|
|
|
count++;
|
|
}
|
|
|
|
i915_active_release(&active->base);
|
|
if (active->retired && count) {
|
|
pr_err("i915_active retired before submission!\n");
|
|
err = -EINVAL;
|
|
}
|
|
if (atomic_read(&active->base.count) != count) {
|
|
pr_err("i915_active not tracking all requests, found %d, expected %d\n",
|
|
atomic_read(&active->base.count), count);
|
|
err = -EINVAL;
|
|
}
|
|
|
|
out:
|
|
i915_sw_fence_commit(submit);
|
|
heap_fence_put(submit);
|
|
if (err) {
|
|
__live_put(active);
|
|
active = ERR_PTR(err);
|
|
}
|
|
|
|
return active;
|
|
}
|
|
|
|
static int live_active_wait(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct live_active *active;
|
|
intel_wakeref_t wakeref;
|
|
int err = 0;
|
|
|
|
/* Check that we get a callback when requests retire upon waiting */
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
|
|
active = __live_active_setup(i915);
|
|
if (IS_ERR(active)) {
|
|
err = PTR_ERR(active);
|
|
goto err;
|
|
}
|
|
|
|
i915_active_wait(&active->base);
|
|
if (!active->retired) {
|
|
pr_err("i915_active not retired after waiting!\n");
|
|
err = -EINVAL;
|
|
}
|
|
|
|
__live_put(active);
|
|
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
err = -EIO;
|
|
|
|
err:
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int live_active_retire(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct live_active *active;
|
|
intel_wakeref_t wakeref;
|
|
int err = 0;
|
|
|
|
/* Check that we get a callback when requests are indirectly retired */
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
|
|
active = __live_active_setup(i915);
|
|
if (IS_ERR(active)) {
|
|
err = PTR_ERR(active);
|
|
goto err;
|
|
}
|
|
|
|
/* waits for & retires all requests */
|
|
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
|
err = -EIO;
|
|
|
|
if (!active->retired) {
|
|
pr_err("i915_active not retired after flushing!\n");
|
|
err = -EINVAL;
|
|
}
|
|
|
|
__live_put(active);
|
|
|
|
err:
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
}
|
|
|
|
int i915_active_live_selftests(struct drm_i915_private *i915)
|
|
{
|
|
static const struct i915_subtest tests[] = {
|
|
SUBTEST(live_active_wait),
|
|
SUBTEST(live_active_retire),
|
|
};
|
|
|
|
if (intel_gt_is_wedged(&i915->gt))
|
|
return 0;
|
|
|
|
return i915_subtests(tests, i915);
|
|
}
|