linux_dsm_epyc7002/drivers/gpu/drm/i915/selftests/i915_gem.c
Chris Wilson 2850748ef8 drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).

In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.

Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!

v2: Add some commentary, and some helpers to reduce patch churn.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 15:39:02 +01:00

219 lines
4.5 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/random.h>
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "mock_drm.h"
static int switch_to_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_add(rq);
}
return 0;
}
static void trash_stolen(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(&i915->dsm);
unsigned long page;
u32 prng = 0x12345678;
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
int x;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
prng = next_pseudo_random32(prng);
iowrite32(prng, &s[x]);
}
io_mapping_unmap_atomic(s);
}
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
}
static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
* stolen is lost and needs to be refilled on resume. However, under
* CI we merely do S4-device testing (as full S4 is too unreliable
* for automated testing across a cluster), so to simulate the effect
* of stolen being trashed across S4, we trash it ourselves.
*/
trash_stolen(i915);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
{
i915_gem_suspend(i915);
return 0;
}
static void pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
}
static void pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
}
}
static void pm_resume(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
/*
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
i915_gem_resume(i915);
}
}
static int igt_gem_suspend(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
struct drm_file *file;
int err;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
err = -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto out;
err = pm_prepare(i915);
if (err)
goto out;
pm_suspend(i915);
/* Here be dragons! Note that with S3RST any S3 may become S4! */
simulate_hibernate(i915);
pm_resume(i915);
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
return err;
}
static int igt_gem_hibernate(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
struct drm_file *file;
int err;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
err = -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto out;
err = pm_prepare(i915);
if (err)
goto out;
pm_hibernate(i915);
/* Here be dragons! */
simulate_hibernate(i915);
pm_resume(i915);
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
return err;
}
int i915_gem_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_suspend),
SUBTEST(igt_gem_hibernate),
};
if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_live_subtests(tests, i915);
}