2018-11-30 15:02:53 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2018 Intel Corporation
|
|
|
|
*/
|
2019-06-21 14:08:02 +07:00
|
|
|
#include "gt/intel_gt.h"
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/selftests/igt_gem_utils.h"
|
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
#include "igt_spinner.h"
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
|
2018-11-30 15:02:53 +07:00
|
|
|
{
|
|
|
|
unsigned int mode;
|
|
|
|
void *vaddr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(spin, 0, sizeof(*spin));
|
2019-07-31 15:11:26 +07:00
|
|
|
spin->gt = gt;
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (IS_ERR(spin->hws)) {
|
|
|
|
err = PTR_ERR(spin->hws);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (IS_ERR(spin->obj)) {
|
|
|
|
err = PTR_ERR(spin->obj);
|
|
|
|
goto err_hws;
|
|
|
|
}
|
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 23:19:07 +07:00
|
|
|
i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
|
2018-11-30 15:02:53 +07:00
|
|
|
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
mode = i915_coherent_map_type(gt->i915);
|
2018-11-30 15:02:53 +07:00
|
|
|
vaddr = i915_gem_object_pin_map(spin->obj, mode);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto err_unpin_hws;
|
|
|
|
}
|
|
|
|
spin->batch = vaddr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unpin_hws:
|
|
|
|
i915_gem_object_unpin_map(spin->hws);
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(spin->obj);
|
|
|
|
err_hws:
|
|
|
|
i915_gem_object_put(spin->hws);
|
|
|
|
err:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int seqno_offset(u64 fence)
|
|
|
|
{
|
|
|
|
return offset_in_page(sizeof(u32) * fence);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 hws_address(const struct i915_vma *hws,
|
|
|
|
const struct i915_request *rq)
|
|
|
|
{
|
|
|
|
return hws->node.start + seqno_offset(rq->fence.context);
|
|
|
|
}
|
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
static int move_to_active(struct i915_vma *vma,
|
|
|
|
struct i915_request *rq,
|
|
|
|
unsigned int flags)
|
2018-11-30 15:02:53 +07:00
|
|
|
{
|
2018-12-04 21:15:18 +07:00
|
|
|
int err;
|
|
|
|
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_lock(vma);
|
2019-08-22 02:38:51 +07:00
|
|
|
err = i915_request_await_object(rq, vma->obj,
|
|
|
|
flags & EXEC_OBJECT_WRITE);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, flags);
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_unlock(vma);
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2019-05-28 16:29:56 +07:00
|
|
|
return err;
|
2018-12-04 21:15:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct i915_request *
|
|
|
|
igt_spinner_create_request(struct igt_spinner *spin,
|
2019-07-31 15:11:26 +07:00
|
|
|
struct intel_context *ce,
|
2018-12-04 21:15:18 +07:00
|
|
|
u32 arbitration_command)
|
|
|
|
{
|
2019-07-31 15:11:26 +07:00
|
|
|
struct intel_engine_cs *engine = ce->engine;
|
2018-12-04 21:15:18 +07:00
|
|
|
struct i915_request *rq = NULL;
|
2018-11-30 15:02:53 +07:00
|
|
|
struct i915_vma *hws, *vma;
|
2019-11-01 17:15:28 +07:00
|
|
|
unsigned int flags;
|
2018-11-30 15:02:53 +07:00
|
|
|
u32 *batch;
|
|
|
|
int err;
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
GEM_BUG_ON(spin->gt != ce->vm->gt);
|
2019-06-21 14:08:02 +07:00
|
|
|
|
2019-11-01 17:15:28 +07:00
|
|
|
if (!intel_engine_can_store_dword(ce->engine))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (IS_ERR(vma))
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_CAST(vma);
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
hws = i915_vma_instance(spin->hws, ce->vm, NULL);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (IS_ERR(hws))
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_CAST(hws);
|
2018-11-30 15:02:53 +07:00
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_PTR(err);
|
2018-11-30 15:02:53 +07:00
|
|
|
|
|
|
|
err = i915_vma_pin(hws, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
goto unpin_vma;
|
|
|
|
|
2019-07-31 15:11:26 +07:00
|
|
|
rq = intel_context_create_request(ce);
|
2018-12-04 21:15:18 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2018-11-30 15:02:53 +07:00
|
|
|
goto unpin_hws;
|
|
|
|
}
|
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
err = move_to_active(vma, rq, 0);
|
2018-11-30 15:02:53 +07:00
|
|
|
if (err)
|
2018-12-04 21:15:18 +07:00
|
|
|
goto cancel_rq;
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
err = move_to_active(hws, rq, 0);
|
|
|
|
if (err)
|
|
|
|
goto cancel_rq;
|
2018-11-30 15:02:53 +07:00
|
|
|
|
|
|
|
batch = spin->batch;
|
|
|
|
|
2019-11-01 17:15:28 +07:00
|
|
|
if (INTEL_GEN(rq->i915) >= 8) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = lower_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = upper_32_bits(hws_address(hws, rq));
|
|
|
|
} else if (INTEL_GEN(rq->i915) >= 6) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = hws_address(hws, rq);
|
|
|
|
} else if (INTEL_GEN(rq->i915) >= 4) {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = hws_address(hws, rq);
|
|
|
|
} else {
|
|
|
|
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
|
|
|
*batch++ = hws_address(hws, rq);
|
|
|
|
}
|
2018-11-30 15:02:53 +07:00
|
|
|
*batch++ = rq->fence.seqno;
|
|
|
|
|
|
|
|
*batch++ = arbitration_command;
|
|
|
|
|
2019-11-01 17:15:28 +07:00
|
|
|
if (INTEL_GEN(rq->i915) >= 8)
|
|
|
|
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
|
|
|
|
else if (IS_HASWELL(rq->i915))
|
|
|
|
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
|
|
|
|
else if (INTEL_GEN(rq->i915) >= 6)
|
|
|
|
*batch++ = MI_BATCH_BUFFER_START;
|
|
|
|
else
|
|
|
|
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
|
2018-11-30 15:02:53 +07:00
|
|
|
*batch++ = lower_32_bits(vma->node.start);
|
|
|
|
*batch++ = upper_32_bits(vma->node.start);
|
2019-11-01 17:15:28 +07:00
|
|
|
|
2018-11-30 15:02:53 +07:00
|
|
|
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
|
|
|
|
|
2019-06-21 14:08:02 +07:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2019-02-08 22:37:08 +07:00
|
|
|
if (engine->emit_init_breadcrumb &&
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 18:19:10 +07:00
|
|
|
i915_request_timeline(rq)->has_initial_breadcrumb) {
|
2019-02-08 22:37:08 +07:00
|
|
|
err = engine->emit_init_breadcrumb(rq);
|
|
|
|
if (err)
|
|
|
|
goto cancel_rq;
|
|
|
|
}
|
|
|
|
|
2019-11-01 17:15:28 +07:00
|
|
|
flags = 0;
|
|
|
|
if (INTEL_GEN(rq->i915) <= 5)
|
|
|
|
flags |= I915_DISPATCH_SECURE;
|
|
|
|
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
|
2018-11-30 15:02:53 +07:00
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
cancel_rq:
|
|
|
|
if (err) {
|
|
|
|
i915_request_skip(rq, err);
|
|
|
|
i915_request_add(rq);
|
|
|
|
}
|
2018-11-30 15:02:53 +07:00
|
|
|
unpin_hws:
|
|
|
|
i915_vma_unpin(hws);
|
|
|
|
unpin_vma:
|
|
|
|
i915_vma_unpin(vma);
|
2018-12-04 21:15:18 +07:00
|
|
|
return err ? ERR_PTR(err) : rq;
|
2018-11-30 15:02:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32
|
|
|
|
hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
|
|
|
|
{
|
|
|
|
u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
|
|
|
|
|
|
|
|
return READ_ONCE(*seqno);
|
|
|
|
}
|
|
|
|
|
|
|
|
void igt_spinner_end(struct igt_spinner *spin)
|
|
|
|
{
|
|
|
|
*spin->batch = MI_BATCH_BUFFER_END;
|
2019-06-21 14:08:02 +07:00
|
|
|
intel_gt_chipset_flush(spin->gt);
|
2018-11-30 15:02:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void igt_spinner_fini(struct igt_spinner *spin)
|
|
|
|
{
|
|
|
|
igt_spinner_end(spin);
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(spin->obj);
|
|
|
|
i915_gem_object_put(spin->obj);
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(spin->hws);
|
|
|
|
i915_gem_object_put(spin->hws);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
|
|
|
|
{
|
|
|
|
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
|
|
|
|
rq->fence.seqno),
|
|
|
|
10) &&
|
|
|
|
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
|
|
|
|
rq->fence.seqno),
|
|
|
|
1000));
|
|
|
|
}
|