/* * SPDX-License-Identifier: MIT * * Copyright © 2017-2018 Intel Corporation */ #include #include "gem/i915_gem_pm.h" #include "intel_gt.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" #include "../selftests/igt_flush_test.h" #include "../selftests/mock_gem_device.h" #include "selftests/mock_timeline.h" static struct page *hwsp_page(struct intel_timeline *tl) { struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); return sg_page(obj->mm.pages->sgl); } static unsigned long hwsp_cacheline(struct intel_timeline *tl) { unsigned long address = (unsigned long)page_address(hwsp_page(tl)); return (address + tl->hwsp_offset) / CACHELINE_BYTES; } #define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) struct mock_hwsp_freelist { struct drm_i915_private *i915; struct radix_tree_root cachelines; struct intel_timeline **history; unsigned long count, max; struct rnd_state prng; }; enum { SHUFFLE = BIT(0), }; static void __mock_hwsp_record(struct mock_hwsp_freelist *state, unsigned int idx, struct intel_timeline *tl) { tl = xchg(&state->history[idx], tl); if (tl) { radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); intel_timeline_put(tl); } } static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned int count, unsigned int flags) { struct intel_timeline *tl; unsigned int idx; while (count--) { unsigned long cacheline; int err; tl = intel_timeline_create(&state->i915->gt, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); cacheline = hwsp_cacheline(tl); err = radix_tree_insert(&state->cachelines, cacheline, tl); if (err) { if (err == -EEXIST) { pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", cacheline); } intel_timeline_put(tl); return err; } idx = state->count++ % state->max; __mock_hwsp_record(state, idx, tl); } if (flags & SHUFFLE) i915_prandom_shuffle(state->history, sizeof(*state->history), min(state->count, state->max), &state->prng); count = i915_prandom_u32_max_state(min(state->count, state->max), &state->prng); while (count--) { idx = --state->count % state->max; __mock_hwsp_record(state, idx, NULL); } return 0; } static int mock_hwsp_freelist(void *arg) { struct mock_hwsp_freelist state; const struct { const char *name; unsigned int flags; } phases[] = { { "linear", 0 }, { "shuffled", SHUFFLE }, { }, }, *p; unsigned int na; int err = 0; INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); state.i915 = mock_gem_device(); if (!state.i915) return -ENOMEM; /* * Create a bunch of timelines and check that their HWSP do not overlap. * Free some, and try again. */ state.max = PAGE_SIZE / sizeof(*state.history); state.count = 0; state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); if (!state.history) { err = -ENOMEM; goto err_put; } mutex_lock(&state.i915->drm.struct_mutex); for (p = phases; p->name; p++) { pr_debug("%s(%s)\n", __func__, p->name); for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { err = __mock_hwsp_timeline(&state, na, p->flags); if (err) goto out; } } out: for (na = 0; na < state.max; na++) __mock_hwsp_record(&state, na, NULL); mutex_unlock(&state.i915->drm.struct_mutex); kfree(state.history); err_put: drm_dev_put(&state.i915->drm); return err; } struct __igt_sync { const char *name; u32 seqno; bool expected; bool set; }; static int __igt_sync(struct intel_timeline *tl, u64 ctx, const struct __igt_sync *p, const char *name) { int ret; if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", name, p->name, ctx, p->seqno, yesno(p->expected)); return -EINVAL; } if (p->set) { ret = __intel_timeline_sync_set(tl, ctx, p->seqno); if (ret) return ret; } return 0; } static int igt_sync(void *arg) { const struct __igt_sync pass[] = { { "unset", 0, false, false }, { "new", 0, false, true }, { "0a", 0, true, true }, { "1a", 1, false, true }, { "1b", 1, true, true }, { "0b", 0, true, false }, { "2a", 2, false, true }, { "4", 4, false, true }, { "INT_MAX", INT_MAX, false, true }, { "INT_MAX-1", INT_MAX-1, true, false }, { "INT_MAX+1", (u32)INT_MAX+1, false, true }, { "INT_MAX", INT_MAX, true, false }, { "UINT_MAX", UINT_MAX, false, true }, { "wrap", 0, false, true }, { "unwrap", UINT_MAX, true, false }, {}, }, *p; struct intel_timeline tl; int order, offset; int ret = -ENODEV; mock_timeline_init(&tl, 0); for (p = pass; p->name; p++) { for (order = 1; order < 64; order++) { for (offset = -1; offset <= (order > 1); offset++) { u64 ctx = BIT_ULL(order) + offset; ret = __igt_sync(&tl, ctx, p, "1"); if (ret) goto out; } } } mock_timeline_fini(&tl); mock_timeline_init(&tl, 0); for (order = 1; order < 64; order++) { for (offset = -1; offset <= (order > 1); offset++) { u64 ctx = BIT_ULL(order) + offset; for (p = pass; p->name; p++) { ret = __igt_sync(&tl, ctx, p, "2"); if (ret) goto out; } } } out: mock_timeline_fini(&tl); return ret; } static unsigned int random_engine(struct rnd_state *rnd) { return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd); } static int bench_sync(void *arg) { struct rnd_state prng; struct intel_timeline tl; unsigned long end_time, count; u64 prng32_1M; ktime_t kt; int order, last_order; mock_timeline_init(&tl, 0); /* Lookups from cache are very fast and so the random number generation * and the loop itself becomes a significant factor in the per-iteration * timings. We try to compensate the results by measuring the overhead * of the prng and subtract it from the reported results. */ prandom_seed_state(&prng, i915_selftest.random_seed); count = 0; kt = ktime_get(); end_time = jiffies + HZ/10; do { u32 x; /* Make sure the compiler doesn't optimise away the prng call */ WRITE_ONCE(x, prandom_u32_state(&prng)); count++; } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); pr_debug("%s: %lu random evaluations, %lluns/prng\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count); /* Benchmark (only) setting random context ids */ prandom_seed_state(&prng, i915_selftest.random_seed); count = 0; kt = ktime_get(); end_time = jiffies + HZ/10; do { u64 id = i915_prandom_u64_state(&prng); __intel_timeline_sync_set(&tl, id, 0); count++; } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); pr_info("%s: %lu random insertions, %lluns/insert\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); /* Benchmark looking up the exact same context ids as we just set */ prandom_seed_state(&prng, i915_selftest.random_seed); end_time = count; kt = ktime_get(); while (end_time--) { u64 id = i915_prandom_u64_state(&prng); if (!__intel_timeline_sync_is_later(&tl, id, 0)) { mock_timeline_fini(&tl); pr_err("Lookup of %llu failed\n", id); return -EINVAL; } } kt = ktime_sub(ktime_get(), kt); kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); pr_info("%s: %lu random lookups, %lluns/lookup\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); mock_timeline_fini(&tl); cond_resched(); mock_timeline_init(&tl, 0); /* Benchmark setting the first N (in order) contexts */ count = 0; kt = ktime_get(); end_time = jiffies + HZ/10; do { __intel_timeline_sync_set(&tl, count++, 0); } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); pr_info("%s: %lu in-order insertions, %lluns/insert\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); /* Benchmark looking up the exact same context ids as we just set */ end_time = count; kt = ktime_get(); while (end_time--) { if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) { pr_err("Lookup of %lu failed\n", end_time); mock_timeline_fini(&tl); return -EINVAL; } } kt = ktime_sub(ktime_get(), kt); pr_info("%s: %lu in-order lookups, %lluns/lookup\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); mock_timeline_fini(&tl); cond_resched(); mock_timeline_init(&tl, 0); /* Benchmark searching for a random context id and maybe changing it */ prandom_seed_state(&prng, i915_selftest.random_seed); count = 0; kt = ktime_get(); end_time = jiffies + HZ/10; do { u32 id = random_engine(&prng); u32 seqno = prandom_u32_state(&prng); if (!__intel_timeline_sync_is_later(&tl, id, seqno)) __intel_timeline_sync_set(&tl, id, seqno); count++; } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); pr_info("%s: %lu repeated insert/lookups, %lluns/op\n", __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); mock_timeline_fini(&tl); cond_resched(); /* Benchmark searching for a known context id and changing the seqno */ for (last_order = 1, order = 1; order < 32; ({ int tmp = last_order; last_order = order; order += tmp; })) { unsigned int mask = BIT(order) - 1; mock_timeline_init(&tl, 0); count = 0; kt = ktime_get(); end_time = jiffies + HZ/10; do { /* Without assuming too many details of the underlying * implementation, try to identify its phase-changes * (if any)! */ u64 id = (u64)(count & mask) << order; __intel_timeline_sync_is_later(&tl, id, 0); __intel_timeline_sync_set(&tl, id, 0); count++; } while (!time_after(jiffies, end_time)); kt = ktime_sub(ktime_get(), kt); pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n", __func__, count, order, (long long)div64_ul(ktime_to_ns(kt), count)); mock_timeline_fini(&tl); cond_resched(); } return 0; } int intel_timeline_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(mock_hwsp_freelist), SUBTEST(igt_sync), SUBTEST(bench_sync), }; return i915_subtests(tests, NULL); } static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) { u32 *cs; cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) return PTR_ERR(cs); if (INTEL_GEN(rq->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = addr; *cs++ = 0; *cs++ = value; } else if (INTEL_GEN(rq->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = addr; *cs++ = value; } else { *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = addr; *cs++ = value; *cs++ = MI_NOOP; } intel_ring_advance(rq, cs); return 0; } static struct i915_request * tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) { struct i915_request *rq; int err; lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ err = intel_timeline_pin(tl); if (err) { rq = ERR_PTR(err); goto out; } rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) goto out_unpin; err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); i915_request_add(rq); if (err) rq = ERR_PTR(err); out_unpin: intel_timeline_unpin(tl); out: if (IS_ERR(rq)) pr_err("Failed to write to timeline!\n"); return rq; } static struct intel_timeline * checked_intel_timeline_create(struct drm_i915_private *i915) { struct intel_timeline *tl; tl = intel_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) return tl; if (*tl->hwsp_seqno != tl->seqno) { pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", *tl->hwsp_seqno, tl->seqno); intel_timeline_put(tl); return ERR_PTR(-EINVAL); } return tl; } static int live_hwsp_engine(void *arg) { #define NUM_TIMELINES 4096 struct drm_i915_private *i915 = arg; struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned long count, n; int err = 0; /* * Create a bunch of timelines and check we can write * independently to each of their breadcrumb slots. */ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, sizeof(*timelines), GFP_KERNEL); if (!timelines) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { if (!intel_engine_can_store_dword(engine)) continue; for (n = 0; n < NUM_TIMELINES; n++) { struct intel_timeline *tl; struct i915_request *rq; tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; } rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } timelines[count++] = tl; } } out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; for (n = 0; n < count; n++) { struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", n, *tl->hwsp_seqno); err = -EINVAL; } intel_timeline_put(tl); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); return err; #undef NUM_TIMELINES } static int live_hwsp_alternate(void *arg) { #define NUM_TIMELINES 4096 struct drm_i915_private *i915 = arg; struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned long count, n; int err = 0; /* * Create a bunch of timelines and check we can write * independently to each of their breadcrumb slots with adjacent * engines. */ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, sizeof(*timelines), GFP_KERNEL); if (!timelines) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for (n = 0; n < NUM_TIMELINES; n++) { for_each_engine(engine, i915, id) { struct intel_timeline *tl; struct i915_request *rq; if (!intel_engine_can_store_dword(engine)) continue; tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; } rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } timelines[count++] = tl; } } out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; for (n = 0; n < count; n++) { struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", n, *tl->hwsp_seqno); err = -EINVAL; } intel_timeline_put(tl); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); return err; #undef NUM_TIMELINES } static int live_hwsp_wrap(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct intel_timeline *tl; enum intel_engine_id id; intel_wakeref_t wakeref; int err = 0; /* * Across a seqno wrap, we need to keep the old cacheline alive for * foreign GPU references. */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); tl = intel_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out_rpm; } if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) goto out_free; err = intel_timeline_pin(tl); if (err) goto out_free; for_each_engine(engine, i915, id) { const u32 *hwsp_seqno[2]; struct i915_request *rq; u32 seqno[2]; if (!intel_engine_can_store_dword(engine)) continue; rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out; } tl->seqno = -4u; err = intel_timeline_get_seqno(tl, rq, &seqno[0]); if (err) { i915_request_add(rq); goto out; } pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", seqno[0], tl->hwsp_offset); err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); if (err) { i915_request_add(rq); goto out; } hwsp_seqno[0] = tl->hwsp_seqno; err = intel_timeline_get_seqno(tl, rq, &seqno[1]); if (err) { i915_request_add(rq); goto out; } pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", seqno[1], tl->hwsp_offset); err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); if (err) { i915_request_add(rq); goto out; } hwsp_seqno[1] = tl->hwsp_seqno; /* With wrap should come a new hwsp */ GEM_BUG_ON(seqno[1] >= seqno[0]); GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Wait for timeline writes timed out!\n"); err = -EIO; goto out; } if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", *hwsp_seqno[0], *hwsp_seqno[1], seqno[0], seqno[1]); err = -EINVAL; goto out; } i915_retire_requests(i915); /* recycle HWSP */ } out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; intel_timeline_unpin(tl); out_free: intel_timeline_put(tl); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } static int live_hwsp_recycle(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned long count; int err = 0; /* * Check seqno writes into one timeline at a time. We expect to * recycle the breadcrumb slot between iterations and neither * want to confuse ourselves or the GPU. */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { IGT_TIMEOUT(end_time); if (!intel_engine_can_store_dword(engine)) continue; do { struct intel_timeline *tl; struct i915_request *rq; tl = checked_intel_timeline_create(i915); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out; } rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); goto out; } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Wait for timeline writes timed out!\n"); intel_timeline_put(tl); err = -EIO; goto out; } if (*tl->hwsp_seqno != count) { pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", count, *tl->hwsp_seqno); err = -EINVAL; } intel_timeline_put(tl); count++; if (err) goto out; intel_timelines_park(i915); /* Encourage recycling! */ } while (!__igt_timeout(end_time, NULL)); } out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } int intel_timeline_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_hwsp_recycle), SUBTEST(live_hwsp_engine), SUBTEST(live_hwsp_alternate), SUBTEST(live_hwsp_wrap), }; if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_live_subtests(tests, i915); }