linux_dsm_epyc7002/drivers/gpu/drm/i915/selftests/igt_spinner.c
Chris Wilson 8f98d4baf0 drm/i915/selftests: Reorder request allocation vs vma pinning
Impose a restraint that we have all vma pinned for a request prior to
its allocation. This is to simplify request construction, and should
facilitate unravelling the lock interdependencies later.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181204141522.13640-3-chris@chris-wilson.co.uk
2018-12-04 17:53:19 +00:00

200 lines
4.2 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
{
unsigned int mode;
void *vaddr;
int err;
GEM_BUG_ON(INTEL_GEN(i915) < 8);
memset(spin, 0, sizeof(*spin));
spin->i915 = i915;
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws);
goto err;
}
spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj);
goto err_hws;
}
i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
mode = i915_coherent_map_type(i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
}
spin->batch = vaddr;
return 0;
err_unpin_hws:
i915_gem_object_unpin_map(spin->hws);
err_obj:
i915_gem_object_put(spin->obj);
err_hws:
i915_gem_object_put(spin->hws);
err:
return err;
}
static unsigned int seqno_offset(u64 fence)
{
return offset_in_page(sizeof(u32) * fence);
}
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
return hws->node.start + seqno_offset(rq->fence.context);
}
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
int err;
err = i915_vma_move_to_active(vma, rq, flags);
if (err)
return err;
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
return 0;
}
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
vma = i915_vma_instance(spin->obj, vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
}
err = move_to_active(vma, rq, 0);
if (err)
goto cancel_rq;
err = move_to_active(hws, rq, 0);
if (err)
goto cancel_rq;
batch = spin->batch;
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
i915_gem_chipset_flush(spin->i915);
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
if (err) {
i915_request_skip(rq, err);
i915_request_add(rq);
}
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
return err ? ERR_PTR(err) : rq;
}
static u32
hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
{
u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
return READ_ONCE(*seqno);
}
void igt_spinner_end(struct igt_spinner *spin)
{
*spin->batch = MI_BATCH_BUFFER_END;
i915_gem_chipset_flush(spin->i915);
}
void igt_spinner_fini(struct igt_spinner *spin)
{
igt_spinner_end(spin);
i915_gem_object_unpin_map(spin->obj);
i915_gem_object_put(spin->obj);
i915_gem_object_unpin_map(spin->hws);
i915_gem_object_put(spin->hws);
}
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
if (!wait_event_timeout(rq->execute,
READ_ONCE(rq->global_seqno),
msecs_to_jiffies(10)))
return false;
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
10) &&
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
1000));
}