2017-02-14 00:15:58 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-07-21 19:32:34 +07:00
|
|
|
#include <linux/kthread.h>
|
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_context.h"
|
2019-06-21 14:08:02 +07:00
|
|
|
#include "gt/intel_gt.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "intel_engine_pm.h"
|
|
|
|
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "i915_selftest.h"
|
|
|
|
#include "selftests/i915_random.h"
|
|
|
|
#include "selftests/igt_flush_test.h"
|
|
|
|
#include "selftests/igt_reset.h"
|
2019-05-23 02:31:56 +07:00
|
|
|
#include "selftests/igt_atomic.h"
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "selftests/mock_drm.h"
|
2017-07-21 19:32:34 +07:00
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/selftests/mock_context.h"
|
|
|
|
#include "gem/selftests/igt_gem_utils.h"
|
|
|
|
|
2018-04-11 19:03:46 +07:00
|
|
|
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
struct hang {
|
2019-06-21 14:08:02 +07:00
|
|
|
struct intel_gt *gt;
|
2017-02-14 00:15:58 +07:00
|
|
|
struct drm_i915_gem_object *hws;
|
|
|
|
struct drm_i915_gem_object *obj;
|
2018-02-05 22:24:29 +07:00
|
|
|
struct i915_gem_context *ctx;
|
2017-02-14 00:15:58 +07:00
|
|
|
u32 *seqno;
|
|
|
|
u32 *batch;
|
|
|
|
};
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int hang_init(struct hang *h, struct intel_gt *gt)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
|
|
|
void *vaddr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(h, 0, sizeof(*h));
|
2019-07-13 02:29:53 +07:00
|
|
|
h->gt = gt;
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
h->ctx = kernel_context(gt->i915);
|
2018-02-05 22:24:29 +07:00
|
|
|
if (IS_ERR(h->ctx))
|
|
|
|
return PTR_ERR(h->ctx);
|
|
|
|
|
2019-02-18 21:50:50 +07:00
|
|
|
GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
2018-02-05 22:24:29 +07:00
|
|
|
if (IS_ERR(h->hws)) {
|
|
|
|
err = PTR_ERR(h->hws);
|
|
|
|
goto err_ctx;
|
|
|
|
}
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(h->obj)) {
|
|
|
|
err = PTR_ERR(h->obj);
|
|
|
|
goto err_hws;
|
|
|
|
}
|
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 23:19:07 +07:00
|
|
|
i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
|
2017-02-14 00:15:58 +07:00
|
|
|
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto err_obj;
|
|
|
|
}
|
|
|
|
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
|
|
|
|
|
|
|
vaddr = i915_gem_object_pin_map(h->obj,
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_coherent_map_type(gt->i915));
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto err_unpin_hws;
|
|
|
|
}
|
|
|
|
h->batch = vaddr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unpin_hws:
|
|
|
|
i915_gem_object_unpin_map(h->hws);
|
|
|
|
err_obj:
|
|
|
|
i915_gem_object_put(h->obj);
|
|
|
|
err_hws:
|
|
|
|
i915_gem_object_put(h->hws);
|
2018-02-05 22:24:29 +07:00
|
|
|
err_ctx:
|
|
|
|
kernel_context_close(h->ctx);
|
2017-02-14 00:15:58 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 hws_address(const struct i915_vma *hws,
|
2018-02-21 16:56:36 +07:00
|
|
|
const struct i915_request *rq)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
|
|
|
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
|
|
|
|
}
|
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
static int move_to_active(struct i915_vma *vma,
|
|
|
|
struct i915_request *rq,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_lock(vma);
|
2019-08-19 18:20:33 +07:00
|
|
|
err = i915_request_await_object(rq, vma->obj,
|
|
|
|
flags & EXEC_OBJECT_WRITE);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(vma, rq, flags);
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_unlock(vma);
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2019-05-28 16:29:56 +07:00
|
|
|
return err;
|
2018-12-04 21:15:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_request *
|
|
|
|
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = h->gt;
|
2019-06-21 14:08:08 +07:00
|
|
|
struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
|
2019-06-22 01:37:59 +07:00
|
|
|
struct drm_i915_gem_object *obj;
|
2018-12-04 21:15:18 +07:00
|
|
|
struct i915_request *rq = NULL;
|
2017-02-14 00:15:58 +07:00
|
|
|
struct i915_vma *hws, *vma;
|
|
|
|
unsigned int flags;
|
2019-06-22 01:37:59 +07:00
|
|
|
void *vaddr;
|
2017-02-14 00:15:58 +07:00
|
|
|
u32 *batch;
|
|
|
|
int err;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
|
2019-06-22 01:37:59 +07:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
|
2019-06-22 01:37:59 +07:00
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_CAST(vaddr);
|
|
|
|
}
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2019-06-22 01:37:59 +07:00
|
|
|
i915_gem_object_unpin_map(h->obj);
|
|
|
|
i915_gem_object_put(h->obj);
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2019-06-22 01:37:59 +07:00
|
|
|
h->obj = obj;
|
|
|
|
h->batch = vaddr;
|
2018-12-04 21:15:18 +07:00
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
vma = i915_vma_instance(h->obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma))
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_CAST(vma);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
hws = i915_vma_instance(h->hws, vm, NULL);
|
|
|
|
if (IS_ERR(hws))
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_CAST(hws);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
2018-12-04 21:15:18 +07:00
|
|
|
return ERR_PTR(err);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
err = i915_vma_pin(hws, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
goto unpin_vma;
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(h->ctx, engine);
|
2018-12-04 21:15:18 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2018-07-06 17:39:44 +07:00
|
|
|
goto unpin_hws;
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
err = move_to_active(vma, rq, 0);
|
2018-07-06 17:39:44 +07:00
|
|
|
if (err)
|
2018-12-04 21:15:18 +07:00
|
|
|
goto cancel_rq;
|
2018-07-06 17:39:44 +07:00
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
err = move_to_active(hws, rq, 0);
|
|
|
|
if (err)
|
|
|
|
goto cancel_rq;
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
batch = h->batch;
|
2019-07-13 02:29:53 +07:00
|
|
|
if (INTEL_GEN(gt->i915) >= 8) {
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = lower_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = upper_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = rq->fence.seqno;
|
2017-12-17 20:28:52 +07:00
|
|
|
*batch++ = MI_ARB_CHECK;
|
|
|
|
|
|
|
|
memset(batch, 0, 1024);
|
|
|
|
batch += 1024 / sizeof(*batch);
|
|
|
|
|
|
|
|
*batch++ = MI_ARB_CHECK;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
|
|
|
*batch++ = lower_32_bits(vma->node.start);
|
|
|
|
*batch++ = upper_32_bits(vma->node.start);
|
2019-07-13 02:29:53 +07:00
|
|
|
} else if (INTEL_GEN(gt->i915) >= 6) {
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4;
|
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = lower_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = rq->fence.seqno;
|
2017-12-17 20:28:52 +07:00
|
|
|
*batch++ = MI_ARB_CHECK;
|
|
|
|
|
|
|
|
memset(batch, 0, 1024);
|
|
|
|
batch += 1024 / sizeof(*batch);
|
|
|
|
|
|
|
|
*batch++ = MI_ARB_CHECK;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
|
|
|
|
*batch++ = lower_32_bits(vma->node.start);
|
2019-07-13 02:29:53 +07:00
|
|
|
} else if (INTEL_GEN(gt->i915) >= 4) {
|
2018-07-06 21:23:22 +07:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = 0;
|
|
|
|
*batch++ = lower_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = rq->fence.seqno;
|
2017-12-17 20:28:52 +07:00
|
|
|
*batch++ = MI_ARB_CHECK;
|
|
|
|
|
|
|
|
memset(batch, 0, 1024);
|
|
|
|
batch += 1024 / sizeof(*batch);
|
|
|
|
|
|
|
|
*batch++ = MI_ARB_CHECK;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
|
|
|
|
*batch++ = lower_32_bits(vma->node.start);
|
|
|
|
} else {
|
2018-07-06 21:23:23 +07:00
|
|
|
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = lower_32_bits(hws_address(hws, rq));
|
|
|
|
*batch++ = rq->fence.seqno;
|
2017-12-17 20:28:52 +07:00
|
|
|
*batch++ = MI_ARB_CHECK;
|
|
|
|
|
|
|
|
memset(batch, 0, 1024);
|
|
|
|
batch += 1024 / sizeof(*batch);
|
|
|
|
|
|
|
|
*batch++ = MI_ARB_CHECK;
|
2018-07-05 22:47:56 +07:00
|
|
|
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
|
2017-02-14 00:15:58 +07:00
|
|
|
*batch++ = lower_32_bits(vma->node.start);
|
|
|
|
}
|
|
|
|
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
|
2019-06-21 14:08:02 +07:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-02-08 22:37:08 +07:00
|
|
|
if (rq->engine->emit_init_breadcrumb) {
|
|
|
|
err = rq->engine->emit_init_breadcrumb(rq);
|
|
|
|
if (err)
|
|
|
|
goto cancel_rq;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
flags = 0;
|
2019-07-13 02:29:53 +07:00
|
|
|
if (INTEL_GEN(gt->i915) <= 5)
|
2017-02-14 00:15:58 +07:00
|
|
|
flags |= I915_DISPATCH_SECURE;
|
|
|
|
|
|
|
|
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
|
|
|
|
|
2018-12-04 21:15:18 +07:00
|
|
|
cancel_rq:
|
|
|
|
if (err) {
|
|
|
|
i915_request_skip(rq, err);
|
|
|
|
i915_request_add(rq);
|
|
|
|
}
|
2018-07-06 17:39:44 +07:00
|
|
|
unpin_hws:
|
2017-02-14 00:15:58 +07:00
|
|
|
i915_vma_unpin(hws);
|
|
|
|
unpin_vma:
|
|
|
|
i915_vma_unpin(vma);
|
2018-12-04 21:15:18 +07:00
|
|
|
return err ? ERR_PTR(err) : rq;
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
|
|
|
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hang_fini(struct hang *h)
|
|
|
|
{
|
|
|
|
*h->batch = MI_BATCH_BUFFER_END;
|
2019-07-29 15:59:44 +07:00
|
|
|
intel_gt_chipset_flush(h->gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
i915_gem_object_unpin_map(h->obj);
|
|
|
|
i915_gem_object_put(h->obj);
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(h->hws);
|
|
|
|
i915_gem_object_put(h->hws);
|
|
|
|
|
2018-02-05 22:24:29 +07:00
|
|
|
kernel_context_close(h->ctx);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
static bool wait_until_running(struct hang *h, struct i915_request *rq)
|
2017-12-17 20:28:52 +07:00
|
|
|
{
|
|
|
|
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
|
|
|
|
rq->fence.seqno),
|
|
|
|
10) &&
|
|
|
|
wait_for(i915_seqno_passed(hws_seqno(h, rq),
|
|
|
|
rq->fence.seqno),
|
|
|
|
1000));
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
static int igt_hang_sanitycheck(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 00:15:58 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
struct hang h;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Basic check that we can execute our hanging batch */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
|
|
|
struct intel_wedge_me w;
|
2017-02-14 00:15:58 +07:00
|
|
|
long timeout;
|
|
|
|
|
2017-08-16 15:52:04 +07:00
|
|
|
if (!intel_engine_can_store_dword(engine))
|
|
|
|
continue;
|
|
|
|
|
2018-02-05 22:24:29 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
pr_err("Failed to create request for %s, err=%d\n",
|
|
|
|
engine->name, err);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
*h.batch = MI_BATCH_BUFFER_END;
|
2019-06-21 14:08:02 +07:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2017-09-26 22:34:09 +07:00
|
|
|
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2018-12-03 18:36:59 +07:00
|
|
|
timeout = 0;
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
|
2019-06-18 14:41:30 +07:00
|
|
|
timeout = i915_request_wait(rq, 0,
|
2018-12-03 18:36:59 +07:00
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2018-12-03 18:36:59 +07:00
|
|
|
timeout = -EIO;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
if (timeout < 0) {
|
|
|
|
err = timeout;
|
|
|
|
pr_err("Wait for request failed on %s, err=%d\n",
|
|
|
|
engine->name, err);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fini:
|
|
|
|
hang_fini(&h);
|
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-02-14 00:15:58 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-04-11 19:03:46 +07:00
|
|
|
static bool wait_for_idle(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
|
|
|
|
}
|
|
|
|
|
2019-02-26 16:49:22 +07:00
|
|
|
static int igt_reset_nop(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
2019-02-26 16:49:22 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
unsigned int reset_count, count;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
struct drm_file *file;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Check that we can reset during non-user portions of requests */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
file = mock_file(gt->i915);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
ctx = live_context(gt->i915, file);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_context_clear_bannable(ctx);
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_count = i915_reset_count(global);
|
2019-02-26 16:49:22 +07:00
|
|
|
count = 0;
|
|
|
|
do {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2019-06-26 20:44:31 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-02-26 16:49:22 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_global_reset_lock(gt);
|
|
|
|
intel_gt_reset(gt, ALL_ENGINES, NULL);
|
|
|
|
igt_global_reset_unlock(gt);
|
2019-06-26 20:44:31 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
|
|
|
if (intel_gt_is_wedged(gt)) {
|
2019-02-26 16:49:22 +07:00
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_count(global) != reset_count + ++count) {
|
2019-02-26 16:49:22 +07:00
|
|
|
pr_err("Full GPU reset not recorded!\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = igt_flush_test(gt->i915, 0);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
} while (time_before(jiffies, end_time));
|
|
|
|
pr_info("%s: %d resets\n", __func__, count);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
|
|
|
|
out:
|
2019-07-13 02:29:53 +07:00
|
|
|
mock_file_free(gt->i915, file);
|
|
|
|
if (intel_gt_is_wedged(gt))
|
2019-02-26 16:49:22 +07:00
|
|
|
err = -EIO;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_reset_nop_engine(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
2019-02-26 16:49:22 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct i915_gem_context *ctx;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
struct drm_file *file;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Check that we can engine-reset during non-user portions */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_reset_engine(gt->i915))
|
2019-02-26 16:49:22 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
file = mock_file(gt->i915);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
ctx = live_context(gt->i915, file);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_gem_context_clear_bannable(ctx);
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-02-26 16:49:22 +07:00
|
|
|
unsigned int reset_count, reset_engine_count;
|
|
|
|
unsigned int count;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_count = i915_reset_count(global);
|
|
|
|
reset_engine_count = i915_reset_engine_count(global, engine);
|
2019-02-26 16:49:22 +07:00
|
|
|
count = 0;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
set_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
2019-02-26 16:49:22 +07:00
|
|
|
do {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!wait_for_idle(engine)) {
|
|
|
|
pr_err("%s failed to idle before reset\n",
|
|
|
|
engine->name);
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
2019-04-26 23:33:36 +07:00
|
|
|
rq = igt_request_alloc(ctx, engine);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
}
|
2019-07-13 02:29:53 +07:00
|
|
|
err = intel_engine_reset(engine, NULL);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("i915_reset_engine failed\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_count(global) != reset_count) {
|
2019-02-26 16:49:22 +07:00
|
|
|
pr_err("Full GPU reset recorded! (engine reset expected)\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_engine_count(global, engine) !=
|
2019-02-26 16:49:22 +07:00
|
|
|
reset_engine_count + ++count) {
|
|
|
|
pr_err("%s engine reset not recorded!\n",
|
|
|
|
engine->name);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (time_before(jiffies, end_time));
|
2019-07-13 02:29:53 +07:00
|
|
|
clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
2019-02-26 16:49:22 +07:00
|
|
|
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = igt_flush_test(gt->i915, 0);
|
2019-02-26 16:49:22 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2019-02-26 16:49:22 +07:00
|
|
|
|
|
|
|
out:
|
2019-07-13 02:29:53 +07:00
|
|
|
mock_file_free(gt->i915, file);
|
|
|
|
if (intel_gt_is_wedged(gt))
|
2019-02-26 16:49:22 +07:00
|
|
|
err = -EIO;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int __igt_reset_engine(struct intel_gt *gt, bool active)
|
2017-06-20 16:57:50 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
2017-06-20 16:57:50 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
2017-12-17 20:28:52 +07:00
|
|
|
struct hang h;
|
2017-06-20 16:57:50 +07:00
|
|
|
int err = 0;
|
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
/* Check that we can issue an engine reset on an idle engine (no-op) */
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_reset_engine(gt->i915))
|
2017-06-20 16:57:50 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
if (active) {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2017-12-17 20:28:52 +07:00
|
|
|
unsigned int reset_count, reset_engine_count;
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
|
|
|
|
if (active && !intel_engine_can_store_dword(engine))
|
|
|
|
continue;
|
|
|
|
|
2018-04-11 19:03:46 +07:00
|
|
|
if (!wait_for_idle(engine)) {
|
|
|
|
pr_err("%s failed to idle before reset\n",
|
|
|
|
engine->name);
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_count = i915_reset_count(global);
|
|
|
|
reset_engine_count = i915_reset_engine_count(global, engine);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_get(engine);
|
2019-07-13 02:29:53 +07:00
|
|
|
set_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
2017-12-17 20:28:52 +07:00
|
|
|
do {
|
|
|
|
if (active) {
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2018-02-05 22:24:29 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
if (!wait_until_running(&h, rq)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s: Failed to start request %llx, at %x\n",
|
2017-12-17 20:28:52 +07:00
|
|
|
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-12-17 20:28:52 +07:00
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-12-17 20:28:52 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = intel_engine_reset(engine, NULL);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("i915_reset_engine failed\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_count(global) != reset_count) {
|
2017-12-17 20:28:52 +07:00
|
|
|
pr_err("Full GPU reset recorded! (engine reset expected)\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_engine_count(global, engine) !=
|
2019-01-25 20:22:30 +07:00
|
|
|
++reset_engine_count) {
|
|
|
|
pr_err("%s engine reset not recorded!\n",
|
|
|
|
engine->name);
|
2017-12-17 20:28:52 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (time_before(jiffies, end_time));
|
2019-07-13 02:29:53 +07:00
|
|
|
clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_put(engine);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
if (err)
|
2017-06-20 16:57:50 +07:00
|
|
|
break;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = igt_flush_test(gt->i915, 0);
|
2018-02-05 22:24:28 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
2017-06-20 16:57:50 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2017-06-20 16:57:50 +07:00
|
|
|
err = -EIO;
|
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
if (active) {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
hang_fini(&h);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
}
|
|
|
|
|
2017-06-20 16:57:50 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
static int igt_reset_idle_engine(void *arg)
|
|
|
|
{
|
|
|
|
return __igt_reset_engine(arg, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_reset_active_engine(void *arg)
|
|
|
|
{
|
|
|
|
return __igt_reset_engine(arg, true);
|
|
|
|
}
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
struct active_engine {
|
|
|
|
struct task_struct *task;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
unsigned long resets;
|
|
|
|
unsigned int flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TEST_ACTIVE BIT(0)
|
|
|
|
#define TEST_OTHERS BIT(1)
|
|
|
|
#define TEST_SELF BIT(2)
|
|
|
|
#define TEST_PRIORITY BIT(3)
|
|
|
|
|
2018-05-17 21:24:41 +07:00
|
|
|
static int active_request_put(struct i915_request *rq)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!rq)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
|
2019-02-26 16:49:21 +07:00
|
|
|
GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
|
2018-05-17 21:24:41 +07:00
|
|
|
rq->engine->name,
|
|
|
|
rq->fence.context,
|
2019-02-26 16:49:21 +07:00
|
|
|
rq->fence.seqno);
|
2018-05-17 21:24:41 +07:00
|
|
|
GEM_TRACE_DUMP();
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(rq->engine->gt);
|
2018-05-17 21:24:41 +07:00
|
|
|
err = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:32:34 +07:00
|
|
|
static int active_engine(void *data)
|
|
|
|
{
|
2018-03-22 14:35:31 +07:00
|
|
|
I915_RND_STATE(prng);
|
|
|
|
struct active_engine *arg = data;
|
|
|
|
struct intel_engine_cs *engine = arg->engine;
|
|
|
|
struct i915_request *rq[8] = {};
|
|
|
|
struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
|
2017-07-21 19:32:34 +07:00
|
|
|
struct drm_file *file;
|
|
|
|
unsigned long count = 0;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
file = mock_file(engine->i915);
|
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
for (count = 0; count < ARRAY_SIZE(ctx); count++) {
|
|
|
|
mutex_lock(&engine->i915->drm.struct_mutex);
|
|
|
|
ctx[count] = live_context(engine->i915, file);
|
|
|
|
mutex_unlock(&engine->i915->drm.struct_mutex);
|
|
|
|
if (IS_ERR(ctx[count])) {
|
|
|
|
err = PTR_ERR(ctx[count]);
|
|
|
|
while (--count)
|
|
|
|
i915_gem_context_put(ctx[count]);
|
|
|
|
goto err_file;
|
|
|
|
}
|
2017-07-21 19:32:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
2018-03-22 14:35:31 +07:00
|
|
|
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *old = rq[idx];
|
|
|
|
struct i915_request *new;
|
2017-07-21 19:32:34 +07:00
|
|
|
|
|
|
|
mutex_lock(&engine->i915->drm.struct_mutex);
|
2019-04-26 23:33:36 +07:00
|
|
|
new = igt_request_alloc(ctx[idx], engine);
|
2017-07-21 19:32:34 +07:00
|
|
|
if (IS_ERR(new)) {
|
|
|
|
mutex_unlock(&engine->i915->drm.struct_mutex);
|
|
|
|
err = PTR_ERR(new);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (arg->flags & TEST_PRIORITY)
|
2018-04-19 01:40:52 +07:00
|
|
|
ctx[idx]->sched.priority =
|
2018-03-22 14:35:31 +07:00
|
|
|
i915_prandom_u32_max_state(512, &prng);
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
rq[idx] = i915_request_get(new);
|
|
|
|
i915_request_add(new);
|
2017-07-21 19:32:34 +07:00
|
|
|
mutex_unlock(&engine->i915->drm.struct_mutex);
|
|
|
|
|
2018-05-17 21:24:41 +07:00
|
|
|
err = active_request_put(old);
|
|
|
|
if (err)
|
|
|
|
break;
|
2018-03-22 14:49:08 +07:00
|
|
|
|
|
|
|
cond_resched();
|
2017-07-21 19:32:34 +07:00
|
|
|
}
|
|
|
|
|
2018-05-17 21:24:41 +07:00
|
|
|
for (count = 0; count < ARRAY_SIZE(rq); count++) {
|
|
|
|
int err__ = active_request_put(rq[count]);
|
|
|
|
|
|
|
|
/* Keep the first error */
|
|
|
|
if (!err)
|
|
|
|
err = err__;
|
|
|
|
}
|
2017-07-21 19:32:34 +07:00
|
|
|
|
|
|
|
err_file:
|
|
|
|
mock_file_free(engine->i915, file);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int __igt_reset_engines(struct intel_gt *gt,
|
2018-03-22 14:35:31 +07:00
|
|
|
const char *test_name,
|
|
|
|
unsigned int flags)
|
2017-07-21 19:32:34 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
2017-12-17 20:28:52 +07:00
|
|
|
struct intel_engine_cs *engine, *other;
|
2017-07-21 19:32:34 +07:00
|
|
|
enum intel_engine_id id, tmp;
|
2017-12-17 20:28:52 +07:00
|
|
|
struct hang h;
|
2017-07-21 19:32:34 +07:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Check that issuing a reset on one engine does not interfere
|
|
|
|
* with any other engine.
|
|
|
|
*/
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_reset_engine(gt->i915))
|
2017-07-21 19:32:34 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (flags & TEST_ACTIVE) {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2018-03-22 14:35:31 +07:00
|
|
|
|
|
|
|
if (flags & TEST_PRIORITY)
|
2018-04-19 01:40:52 +07:00
|
|
|
h.ctx->sched.priority = 1024;
|
2017-12-17 20:28:52 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2018-03-22 14:35:31 +07:00
|
|
|
struct active_engine threads[I915_NUM_ENGINES] = {};
|
2019-07-13 02:29:53 +07:00
|
|
|
unsigned long device = i915_reset_count(global);
|
2018-03-22 14:35:31 +07:00
|
|
|
unsigned long count = 0, reported;
|
2017-07-21 19:32:34 +07:00
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (flags & TEST_ACTIVE &&
|
|
|
|
!intel_engine_can_store_dword(engine))
|
2017-12-17 20:28:52 +07:00
|
|
|
continue;
|
|
|
|
|
2018-04-11 19:03:46 +07:00
|
|
|
if (!wait_for_idle(engine)) {
|
|
|
|
pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
|
|
|
|
engine->name, test_name);
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:32:34 +07:00
|
|
|
memset(threads, 0, sizeof(threads));
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(other, gt->i915, tmp) {
|
2017-07-21 19:32:34 +07:00
|
|
|
struct task_struct *tsk;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
threads[tmp].resets =
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_reset_engine_count(global, other);
|
2017-07-21 19:32:34 +07:00
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (!(flags & TEST_OTHERS))
|
2017-12-17 20:28:52 +07:00
|
|
|
continue;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (other == engine && !(flags & TEST_SELF))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
threads[tmp].engine = other;
|
|
|
|
threads[tmp].flags = flags;
|
|
|
|
|
|
|
|
tsk = kthread_run(active_engine, &threads[tmp],
|
2017-12-17 20:28:52 +07:00
|
|
|
"igt/%s", other->name);
|
2017-07-21 19:32:34 +07:00
|
|
|
if (IS_ERR(tsk)) {
|
|
|
|
err = PTR_ERR(tsk);
|
|
|
|
goto unwind;
|
|
|
|
}
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
threads[tmp].task = tsk;
|
2017-07-21 19:32:34 +07:00
|
|
|
get_task_struct(tsk);
|
|
|
|
}
|
|
|
|
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_get(engine);
|
2019-07-13 02:29:53 +07:00
|
|
|
set_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
2017-07-21 19:32:34 +07:00
|
|
|
do {
|
2018-03-22 14:35:31 +07:00
|
|
|
struct i915_request *rq = NULL;
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (flags & TEST_ACTIVE) {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2018-02-05 22:24:29 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
if (!wait_until_running(&h, rq)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s: Failed to start request %llx, at %x\n",
|
2017-12-17 20:28:52 +07:00
|
|
|
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-12-17 20:28:52 +07:00
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = intel_engine_reset(engine, NULL);
|
2017-07-21 19:32:34 +07:00
|
|
|
if (err) {
|
2018-03-22 14:35:31 +07:00
|
|
|
pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
|
|
|
|
engine->name, test_name, err);
|
2017-07-21 19:32:34 +07:00
|
|
|
break;
|
|
|
|
}
|
2017-12-17 20:28:52 +07:00
|
|
|
|
|
|
|
count++;
|
2018-03-22 14:35:31 +07:00
|
|
|
|
|
|
|
if (rq) {
|
2019-03-12 18:11:46 +07:00
|
|
|
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
|
|
|
|
struct drm_printer p =
|
2019-07-13 02:29:53 +07:00
|
|
|
drm_info_printer(gt->i915->drm.dev);
|
2019-03-12 18:11:46 +07:00
|
|
|
|
|
|
|
pr_err("i915_reset_engine(%s:%s):"
|
|
|
|
" failed to complete request after reset\n",
|
|
|
|
engine->name, test_name);
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
GEM_TRACE_DUMP();
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2019-03-12 18:11:46 +07:00
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
i915_request_put(rq);
|
|
|
|
}
|
2018-04-11 19:03:46 +07:00
|
|
|
|
|
|
|
if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
|
|
|
|
struct drm_printer p =
|
2019-07-13 02:29:53 +07:00
|
|
|
drm_info_printer(gt->i915->drm.dev);
|
2018-04-11 19:03:46 +07:00
|
|
|
|
|
|
|
pr_err("i915_reset_engine(%s:%s):"
|
|
|
|
" failed to idle after reset\n",
|
|
|
|
engine->name, test_name);
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
|
|
|
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
2017-07-21 19:32:34 +07:00
|
|
|
} while (time_before(jiffies, end_time));
|
2019-07-13 02:29:53 +07:00
|
|
|
clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_put(engine);
|
2017-12-17 20:28:52 +07:00
|
|
|
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
|
2018-03-22 14:35:31 +07:00
|
|
|
engine->name, test_name, count);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reported = i915_reset_engine_count(global, engine);
|
2018-03-22 14:35:31 +07:00
|
|
|
reported -= threads[engine->id].resets;
|
2019-01-25 20:22:30 +07:00
|
|
|
if (reported != count) {
|
|
|
|
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
|
|
|
|
engine->name, test_name, count, reported);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (!err)
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2017-07-21 19:32:34 +07:00
|
|
|
|
|
|
|
unwind:
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(other, gt->i915, tmp) {
|
2017-07-21 19:32:34 +07:00
|
|
|
int ret;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (!threads[tmp].task)
|
2017-07-21 19:32:34 +07:00
|
|
|
continue;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
ret = kthread_stop(threads[tmp].task);
|
2017-07-21 19:32:34 +07:00
|
|
|
if (ret) {
|
2017-12-17 20:28:52 +07:00
|
|
|
pr_err("kthread for other engine %s failed, err=%d\n",
|
|
|
|
other->name, ret);
|
2017-07-21 19:32:34 +07:00
|
|
|
if (!err)
|
|
|
|
err = ret;
|
|
|
|
}
|
2018-03-22 14:35:31 +07:00
|
|
|
put_task_struct(threads[tmp].task);
|
2017-07-21 19:32:34 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (other->uabi_class != engine->uabi_class &&
|
2018-03-22 14:35:31 +07:00
|
|
|
threads[tmp].resets !=
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_reset_engine_count(global, other)) {
|
2017-07-21 19:32:34 +07:00
|
|
|
pr_err("Innocent engine %s was reset (count=%ld)\n",
|
2017-12-17 20:28:52 +07:00
|
|
|
other->name,
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_reset_engine_count(global, other) -
|
2018-03-22 14:35:31 +07:00
|
|
|
threads[tmp].resets);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (!err)
|
|
|
|
err = -EINVAL;
|
2017-07-21 19:32:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (device != i915_reset_count(global)) {
|
2017-07-21 19:32:34 +07:00
|
|
|
pr_err("Global reset (count=%ld)!\n",
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_reset_count(global) - device);
|
2017-12-17 20:28:52 +07:00
|
|
|
if (!err)
|
|
|
|
err = -EINVAL;
|
2017-07-21 19:32:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2018-02-05 22:24:28 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
2017-07-21 19:32:34 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2017-07-21 19:32:34 +07:00
|
|
|
err = -EIO;
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
if (flags & TEST_ACTIVE) {
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
hang_fini(&h);
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-12-17 20:28:52 +07:00
|
|
|
}
|
|
|
|
|
2017-07-21 19:32:34 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
static int igt_reset_engines(void *arg)
|
2017-12-17 20:28:52 +07:00
|
|
|
{
|
2018-03-22 14:35:31 +07:00
|
|
|
static const struct {
|
|
|
|
const char *name;
|
|
|
|
unsigned int flags;
|
|
|
|
} phases[] = {
|
|
|
|
{ "idle", 0 },
|
|
|
|
{ "active", TEST_ACTIVE },
|
|
|
|
{ "others-idle", TEST_OTHERS },
|
|
|
|
{ "others-active", TEST_OTHERS | TEST_ACTIVE },
|
|
|
|
{
|
|
|
|
"others-priority",
|
|
|
|
TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"self-priority",
|
|
|
|
TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
|
|
|
|
},
|
|
|
|
{ }
|
|
|
|
};
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2018-03-22 14:35:31 +07:00
|
|
|
typeof(*phases) *p;
|
|
|
|
int err;
|
2017-12-17 20:28:52 +07:00
|
|
|
|
2018-03-22 14:35:31 +07:00
|
|
|
for (p = phases; p->name; p++) {
|
|
|
|
if (p->flags & TEST_PRIORITY) {
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
|
2018-03-22 14:35:31 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = __igt_reset_engines(arg, p->name, p->flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-12-17 20:28:52 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
u32 count = i915_reset_count(>->i915->gpu_error);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_reset(gt, mask, NULL);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
return count;
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|
|
|
|
|
2018-07-16 20:40:09 +07:00
|
|
|
static int igt_reset_wait(void *arg)
|
2017-02-14 00:15:58 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
|
|
|
struct intel_engine_cs *engine = gt->i915->engine[RCS0];
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 00:15:58 +07:00
|
|
|
unsigned int reset_count;
|
|
|
|
struct hang h;
|
|
|
|
long timeout;
|
|
|
|
int err;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!engine || !intel_engine_can_store_dword(engine))
|
2017-08-16 15:52:04 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
/* Check that we detect a stuck waiter and issue a reset */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_global_reset_lock(gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
if (!wait_until_running(&h, rq)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2017-10-09 18:02:58 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s: Failed to start request %llx, at %x\n",
|
2017-12-17 20:28:52 +07:00
|
|
|
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
2017-12-08 08:23:00 +07:00
|
|
|
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto out_rq;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_count = fake_hangcheck(gt, ALL_ENGINES);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-06-18 14:41:30 +07:00
|
|
|
timeout = i915_request_wait(rq, 0, 10);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (timeout < 0) {
|
2018-02-23 00:24:05 +07:00
|
|
|
pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
|
2017-02-14 00:15:58 +07:00
|
|
|
timeout);
|
|
|
|
err = timeout;
|
|
|
|
goto out_rq;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_count(global) == reset_count) {
|
2017-02-14 00:15:58 +07:00
|
|
|
pr_err("No GPU reset recorded!\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_rq:
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
fini:
|
|
|
|
hang_fini(&h);
|
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
|
|
|
igt_global_reset_unlock(gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2017-02-14 00:15:58 +07:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-16 20:40:09 +07:00
|
|
|
struct evict_vma {
|
|
|
|
struct completion completion;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int evict_vma(void *data)
|
|
|
|
{
|
|
|
|
struct evict_vma *arg = data;
|
|
|
|
struct i915_address_space *vm = arg->vma->vm;
|
|
|
|
struct drm_i915_private *i915 = vm->i915;
|
|
|
|
struct drm_mm_node evict = arg->vma->node;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
complete(&arg->completion);
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
err = i915_gem_evict_for_node(vm, &evict, 0);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-20 02:47:46 +07:00
|
|
|
static int evict_fence(void *data)
|
|
|
|
{
|
|
|
|
struct evict_vma *arg = data;
|
|
|
|
struct drm_i915_private *i915 = arg->vma->vm->i915;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
complete(&arg->completion);
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
/* Mark the fence register as dirty to force the mmio update. */
|
|
|
|
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Invalid Y-tiling settings; err:%d\n", err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2019-08-22 13:09:12 +07:00
|
|
|
err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-07-20 02:47:46 +07:00
|
|
|
err = i915_vma_pin_fence(arg->vma);
|
2019-08-22 13:09:12 +07:00
|
|
|
i915_vma_unpin(arg->vma);
|
2018-07-20 02:47:46 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_unpin_fence(arg->vma);
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int __igt_reset_evict_vma(struct intel_gt *gt,
|
2018-07-20 02:47:46 +07:00
|
|
|
struct i915_address_space *vm,
|
|
|
|
int (*fn)(void *),
|
|
|
|
unsigned int flags)
|
2018-07-16 20:40:09 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_engine_cs *engine = gt->i915->engine[RCS0];
|
2018-07-16 20:40:09 +07:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct task_struct *tsk = NULL;
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct evict_vma arg;
|
|
|
|
struct hang h;
|
|
|
|
int err;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!engine || !intel_engine_can_store_dword(engine))
|
2018-07-16 20:40:09 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check that we can recover an unbind stuck on a hanging request */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
2018-07-16 20:40:09 +07:00
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
|
2018-07-16 20:40:09 +07:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-07-20 02:47:46 +07:00
|
|
|
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
|
|
|
|
err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Invalid X-tiling settings; err:%d\n", err);
|
|
|
|
goto out_obj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-16 20:40:09 +07:00
|
|
|
arg.vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(arg.vma)) {
|
|
|
|
err = PTR_ERR(arg.vma);
|
|
|
|
goto out_obj;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2018-07-16 20:40:09 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto out_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(arg.vma, 0, 0,
|
2018-07-20 02:47:46 +07:00
|
|
|
i915_vma_is_ggtt(arg.vma) ?
|
|
|
|
PIN_GLOBAL | PIN_MAPPABLE :
|
|
|
|
PIN_USER);
|
|
|
|
if (err) {
|
|
|
|
i915_request_add(rq);
|
2018-07-16 20:40:09 +07:00
|
|
|
goto out_obj;
|
2018-07-20 02:47:46 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
|
|
|
|
err = i915_vma_pin_fence(arg.vma);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Unable to pin X-tiled fence; err:%d\n", err);
|
|
|
|
i915_vma_unpin(arg.vma);
|
|
|
|
i915_request_add(rq);
|
|
|
|
goto out_obj;
|
|
|
|
}
|
|
|
|
}
|
2018-07-16 20:40:09 +07:00
|
|
|
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_lock(arg.vma);
|
2019-08-19 18:20:33 +07:00
|
|
|
err = i915_request_await_object(rq, arg.vma->obj,
|
|
|
|
flags & EXEC_OBJECT_WRITE);
|
|
|
|
if (err == 0)
|
|
|
|
err = i915_vma_move_to_active(arg.vma, rq, flags);
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_vma_unlock(arg.vma);
|
2018-07-20 02:47:46 +07:00
|
|
|
|
|
|
|
if (flags & EXEC_OBJECT_NEEDS_FENCE)
|
|
|
|
i915_vma_unpin_fence(arg.vma);
|
2018-07-16 20:40:09 +07:00
|
|
|
i915_vma_unpin(arg.vma);
|
|
|
|
|
|
|
|
i915_request_get(rq);
|
|
|
|
i915_request_add(rq);
|
|
|
|
if (err)
|
|
|
|
goto out_rq;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
if (!wait_until_running(&h, rq)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s: Failed to start request %llx, at %x\n",
|
2018-07-16 20:40:09 +07:00
|
|
|
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
|
|
|
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2018-07-16 20:40:09 +07:00
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
|
|
|
init_completion(&arg.completion);
|
|
|
|
|
2018-07-20 02:47:46 +07:00
|
|
|
tsk = kthread_run(fn, &arg, "igt/evict_vma");
|
2018-07-16 20:40:09 +07:00
|
|
|
if (IS_ERR(tsk)) {
|
|
|
|
err = PTR_ERR(tsk);
|
|
|
|
tsk = NULL;
|
|
|
|
goto out_reset;
|
|
|
|
}
|
2018-11-20 19:06:01 +07:00
|
|
|
get_task_struct(tsk);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
wait_for_completion(&arg.completion);
|
|
|
|
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
pr_err("igt/evict_vma kthread did not wait\n");
|
|
|
|
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2018-07-16 20:40:09 +07:00
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_reset:
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_global_reset_lock(gt);
|
|
|
|
fake_hangcheck(gt, rq->engine->mask);
|
|
|
|
igt_global_reset_unlock(gt);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
if (tsk) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_wedge_me w;
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
/* The reset, even indirectly, should take less than 10ms. */
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
|
2018-07-16 20:40:09 +07:00
|
|
|
err = kthread_stop(tsk);
|
2018-11-20 19:06:01 +07:00
|
|
|
|
|
|
|
put_task_struct(tsk);
|
2018-07-16 20:40:09 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2018-07-16 20:40:09 +07:00
|
|
|
out_rq:
|
|
|
|
i915_request_put(rq);
|
|
|
|
out_obj:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
fini:
|
|
|
|
hang_fini(&h);
|
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2018-07-16 20:40:09 +07:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_reset_evict_ggtt(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2018-07-16 20:40:09 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return __igt_reset_evict_vma(gt, >->ggtt->vm,
|
2018-07-20 02:47:46 +07:00
|
|
|
evict_vma, EXEC_OBJECT_WRITE);
|
2018-07-16 20:40:09 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_reset_evict_ppgtt(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2018-07-16 20:40:09 +07:00
|
|
|
struct i915_gem_context *ctx;
|
2018-07-20 02:47:45 +07:00
|
|
|
struct drm_file *file;
|
2018-07-16 20:40:09 +07:00
|
|
|
int err;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
file = mock_file(gt->i915);
|
2018-07-20 02:47:45 +07:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
ctx = live_context(gt->i915, file);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2018-07-20 02:47:45 +07:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
goto out;
|
|
|
|
}
|
2018-07-16 20:40:09 +07:00
|
|
|
|
|
|
|
err = 0;
|
2019-06-11 16:12:37 +07:00
|
|
|
if (ctx->vm) /* aliasing == global gtt locking, covered above */
|
2019-07-13 02:29:53 +07:00
|
|
|
err = __igt_reset_evict_vma(gt, ctx->vm,
|
2018-07-20 02:47:46 +07:00
|
|
|
evict_vma, EXEC_OBJECT_WRITE);
|
2018-07-16 20:40:09 +07:00
|
|
|
|
2018-07-20 02:47:45 +07:00
|
|
|
out:
|
2019-07-13 02:29:53 +07:00
|
|
|
mock_file_free(gt->i915, file);
|
2018-07-16 20:40:09 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-07-20 02:47:46 +07:00
|
|
|
static int igt_reset_evict_fence(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2018-07-20 02:47:46 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return __igt_reset_evict_vma(gt, >->ggtt->vm,
|
2018-07-20 02:47:46 +07:00
|
|
|
evict_fence, EXEC_OBJECT_NEEDS_FENCE);
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int wait_for_others(struct intel_gt *gt,
|
2018-03-30 20:18:01 +07:00
|
|
|
struct intel_engine_cs *exclude)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2018-03-30 20:18:01 +07:00
|
|
|
if (engine == exclude)
|
|
|
|
continue;
|
|
|
|
|
2018-04-11 19:03:46 +07:00
|
|
|
if (!wait_for_idle(engine))
|
2018-03-30 20:18:01 +07:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
static int igt_reset_queue(void *arg)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
2017-02-14 00:15:58 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
struct hang h;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Check that we replay pending requests following a hang */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_global_reset_lock(gt);
|
2017-07-21 19:32:35 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
err = hang_init(&h, gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *prev;
|
2017-02-14 00:15:58 +07:00
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned int count;
|
|
|
|
|
2017-08-16 15:52:04 +07:00
|
|
|
if (!intel_engine_can_store_dword(engine))
|
|
|
|
continue;
|
|
|
|
|
2018-02-05 22:24:29 +07:00
|
|
|
prev = hang_create_request(&h, engine);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(prev)) {
|
|
|
|
err = PTR_ERR(prev);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(prev);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(prev);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
|
|
|
count = 0;
|
|
|
|
do {
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 00:15:58 +07:00
|
|
|
unsigned int reset_count;
|
|
|
|
|
2018-02-05 22:24:29 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-02-14 00:15:58 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2018-03-30 20:18:01 +07:00
|
|
|
/*
|
|
|
|
* XXX We don't handle resetting the kernel context
|
|
|
|
* very well. If we trigger a device reset twice in
|
|
|
|
* quick succession while the kernel context is
|
|
|
|
* executing, we may end up skipping the breadcrumb.
|
|
|
|
* This is really only a problem for the selftest as
|
|
|
|
* normally there is a large interlude between resets
|
|
|
|
* (hangcheck), or we focus on resetting just one
|
|
|
|
* engine and so avoid repeatedly resetting innocents.
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
err = wait_for_others(gt, engine);
|
2018-03-30 20:18:01 +07:00
|
|
|
if (err) {
|
|
|
|
pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
|
|
|
|
__func__, engine->name);
|
|
|
|
i915_request_put(rq);
|
|
|
|
i915_request_put(prev);
|
|
|
|
|
|
|
|
GEM_TRACE_DUMP();
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2018-03-30 20:18:01 +07:00
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
if (!wait_until_running(&h, prev)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2017-10-09 18:02:58 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s(%s): Failed to start request %llx, at %x\n",
|
2018-03-30 20:18:01 +07:00
|
|
|
__func__, engine->name,
|
|
|
|
prev->fence.seqno, hws_seqno(&h, prev));
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
"%s\n", engine->name);
|
2017-10-09 18:02:58 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
|
|
|
i915_request_put(prev);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_count = fake_hangcheck(gt, BIT(id));
|
2017-03-17 00:13:02 +07:00
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
if (prev->fence.error != -EIO) {
|
|
|
|
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
|
|
|
|
prev->fence.error);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
|
|
|
i915_request_put(prev);
|
2017-02-14 00:15:58 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rq->fence.error) {
|
|
|
|
pr_err("Fence error status not zero [%d] after unrelated reset\n",
|
|
|
|
rq->fence.error);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
|
|
|
i915_request_put(prev);
|
2017-02-14 00:15:58 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (i915_reset_count(global) == reset_count) {
|
2017-02-14 00:15:58 +07:00
|
|
|
pr_err("No GPU reset recorded!\n");
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
|
|
|
i915_request_put(prev);
|
2017-02-14 00:15:58 +07:00
|
|
|
err = -EINVAL;
|
|
|
|
goto fini;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(prev);
|
2017-02-14 00:15:58 +07:00
|
|
|
prev = rq;
|
|
|
|
count++;
|
|
|
|
} while (time_before(jiffies, end_time));
|
|
|
|
pr_info("%s: Completed %d resets\n", engine->name, count);
|
|
|
|
|
|
|
|
*h.batch = MI_BATCH_BUFFER_END;
|
2019-06-21 14:08:02 +07:00
|
|
|
intel_gt_chipset_flush(engine->gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(prev);
|
2018-02-05 22:24:28 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
|
2018-02-05 22:24:28 +07:00
|
|
|
if (err)
|
|
|
|
break;
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
fini:
|
|
|
|
hang_fini(&h);
|
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
|
|
|
igt_global_reset_unlock(gt);
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2017-02-14 00:15:58 +07:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-07-28 18:21:10 +07:00
|
|
|
static int igt_handle_error(void *arg)
|
2017-06-20 16:57:50 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
|
|
|
struct i915_gpu_error *global = >->i915->gpu_error;
|
|
|
|
struct intel_engine_cs *engine = gt->i915->engine[RCS0];
|
2017-06-20 16:57:50 +07:00
|
|
|
struct hang h;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-07-28 18:21:10 +07:00
|
|
|
struct i915_gpu_state *error;
|
|
|
|
int err;
|
2017-06-20 16:57:50 +07:00
|
|
|
|
|
|
|
/* Check that we can issue a global GPU and engine reset */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_reset_engine(gt->i915))
|
2017-06-20 16:57:50 +07:00
|
|
|
return 0;
|
|
|
|
|
2018-04-07 05:03:54 +07:00
|
|
|
if (!engine || !intel_engine_can_store_dword(engine))
|
2017-08-16 15:52:04 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = hang_init(&h, gt);
|
2017-06-20 16:57:50 +07:00
|
|
|
if (err)
|
2017-06-23 20:19:07 +07:00
|
|
|
goto err_unlock;
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2018-02-05 22:24:29 +07:00
|
|
|
rq = hang_create_request(&h, engine);
|
2017-06-20 16:57:50 +07:00
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
2017-06-23 20:19:07 +07:00
|
|
|
goto err_fini;
|
2017-06-20 16:57:50 +07:00
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(rq);
|
2018-06-12 17:51:35 +07:00
|
|
|
i915_request_add(rq);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2018-04-06 17:09:50 +07:00
|
|
|
if (!wait_until_running(&h, rq)) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
|
2017-10-09 18:02:58 +07:00
|
|
|
|
2018-12-07 19:34:28 +07:00
|
|
|
pr_err("%s: Failed to start request %llx, at %x\n",
|
2017-12-17 20:28:52 +07:00
|
|
|
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
2017-12-08 08:23:00 +07:00
|
|
|
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(gt);
|
2017-09-15 20:09:29 +07:00
|
|
|
|
2017-06-20 16:57:50 +07:00
|
|
|
err = -EIO;
|
2017-06-23 20:19:07 +07:00
|
|
|
goto err_request;
|
2017-06-20 16:57:50 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2017-07-28 18:21:10 +07:00
|
|
|
/* Temporarily disable error capture */
|
2019-07-13 02:29:53 +07:00
|
|
|
error = xchg(&global->first_error, (void *)-1);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_handle_error(gt, engine->mask, 0, NULL);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
xchg(&global->first_error, error);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2017-06-20 16:57:50 +07:00
|
|
|
|
2017-07-28 18:21:10 +07:00
|
|
|
if (rq->fence.error != -EIO) {
|
|
|
|
pr_err("Guilty request not identified!\n");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err_request;
|
|
|
|
}
|
2017-06-23 20:19:07 +07:00
|
|
|
|
|
|
|
err_request:
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(rq);
|
2017-06-23 20:19:07 +07:00
|
|
|
err_fini:
|
|
|
|
hang_fini(&h);
|
|
|
|
err_unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2017-07-28 18:21:10 +07:00
|
|
|
return err;
|
2017-06-20 16:57:50 +07:00
|
|
|
}
|
|
|
|
|
2018-12-13 16:15:20 +07:00
|
|
|
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
|
2019-05-23 02:31:56 +07:00
|
|
|
const struct igt_atomic_section *p,
|
2018-12-13 16:15:20 +07:00
|
|
|
const char *mode)
|
|
|
|
{
|
|
|
|
struct tasklet_struct * const t = &engine->execlists.tasklet;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
|
|
|
|
engine->name, mode, p->name);
|
|
|
|
|
|
|
|
tasklet_disable_nosync(t);
|
|
|
|
p->critical_section_begin();
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = intel_engine_reset(engine, NULL);
|
2018-12-13 16:15:20 +07:00
|
|
|
|
|
|
|
p->critical_section_end();
|
|
|
|
tasklet_enable(t);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
|
|
|
|
engine->name, mode, p->name);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
|
2019-05-23 02:31:56 +07:00
|
|
|
const struct igt_atomic_section *p)
|
2018-12-13 16:15:20 +07:00
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
struct hang h;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __igt_atomic_reset_engine(engine, p, "idle");
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = hang_init(&h, engine->gt);
|
2018-12-13 16:15:20 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
rq = hang_create_request(&h, engine);
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_get(rq);
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
if (wait_until_running(&h, rq)) {
|
|
|
|
err = __igt_atomic_reset_engine(engine, p, "active");
|
|
|
|
} else {
|
|
|
|
pr_err("%s(%s): Failed to start request %llx, at %x\n",
|
|
|
|
__func__, engine->name,
|
|
|
|
rq->fence.seqno, hws_seqno(&h, rq));
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(engine->gt);
|
2018-12-13 16:15:20 +07:00
|
|
|
err = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err == 0) {
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_wedge_me w;
|
2018-12-13 16:15:20 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
|
2019-06-18 14:41:30 +07:00
|
|
|
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(engine->gt))
|
2018-12-13 16:15:20 +07:00
|
|
|
err = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_request_put(rq);
|
|
|
|
out:
|
|
|
|
hang_fini(&h);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-05-23 02:31:56 +07:00
|
|
|
static int igt_reset_engines_atomic(void *arg)
|
2018-12-13 16:15:20 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = arg;
|
2019-05-23 02:31:56 +07:00
|
|
|
const typeof(*igt_atomic_phases) *p;
|
2018-12-13 16:15:20 +07:00
|
|
|
int err = 0;
|
|
|
|
|
2019-05-23 02:31:56 +07:00
|
|
|
/* Check that the engines resets are usable from atomic context */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_reset_engine(gt->i915))
|
2019-05-23 02:31:56 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (USES_GUC_SUBMISSION(gt->i915))
|
2019-05-23 02:31:56 +07:00
|
|
|
return 0;
|
2018-12-13 16:15:20 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_global_reset_lock(gt);
|
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
2018-12-13 16:15:20 +07:00
|
|
|
|
|
|
|
/* Flush any requests before we get started and check basics */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!igt_force_reset(gt))
|
2018-12-13 16:15:20 +07:00
|
|
|
goto unlock;
|
|
|
|
|
2019-05-23 02:31:56 +07:00
|
|
|
for (p = igt_atomic_phases; p->name; p++) {
|
2018-12-13 16:15:20 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-05-23 02:31:56 +07:00
|
|
|
err = igt_atomic_reset_engine(engine, p);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2018-12-13 16:15:20 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* As we poke around the guts, do a full reset before continuing. */
|
2019-07-13 02:29:53 +07:00
|
|
|
igt_force_reset(gt);
|
2018-12-13 16:15:20 +07:00
|
|
|
|
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
|
|
|
igt_global_reset_unlock(gt);
|
2018-12-13 16:15:20 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:58 +07:00
|
|
|
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(igt_hang_sanitycheck),
|
2019-02-26 16:49:22 +07:00
|
|
|
SUBTEST(igt_reset_nop),
|
|
|
|
SUBTEST(igt_reset_nop_engine),
|
2017-12-17 20:28:52 +07:00
|
|
|
SUBTEST(igt_reset_idle_engine),
|
|
|
|
SUBTEST(igt_reset_active_engine),
|
2018-03-22 14:35:31 +07:00
|
|
|
SUBTEST(igt_reset_engines),
|
2019-05-23 02:31:56 +07:00
|
|
|
SUBTEST(igt_reset_engines_atomic),
|
2017-02-14 00:15:58 +07:00
|
|
|
SUBTEST(igt_reset_queue),
|
2018-07-16 20:40:09 +07:00
|
|
|
SUBTEST(igt_reset_wait),
|
|
|
|
SUBTEST(igt_reset_evict_ggtt),
|
|
|
|
SUBTEST(igt_reset_evict_ppgtt),
|
2018-07-20 02:47:46 +07:00
|
|
|
SUBTEST(igt_reset_evict_fence),
|
2017-07-28 18:21:10 +07:00
|
|
|
SUBTEST(igt_handle_error),
|
2017-02-14 00:15:58 +07:00
|
|
|
};
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = &i915->gt;
|
2019-01-14 21:21:22 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2017-12-17 20:28:52 +07:00
|
|
|
bool saved_hangcheck;
|
2017-10-09 18:03:00 +07:00
|
|
|
int err;
|
2017-02-14 00:15:58 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_gpu_reset(gt->i915))
|
2017-02-14 00:15:58 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_gt_is_wedged(gt))
|
2018-07-05 22:02:14 +07:00
|
|
|
return -EIO; /* we're long past hope of a successful reset */
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
2017-12-17 20:28:52 +07:00
|
|
|
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
|
2019-07-13 02:29:53 +07:00
|
|
|
drain_delayed_work(>->hangcheck.work); /* flush param */
|
2017-10-09 18:03:00 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = intel_gt_live_subtests(tests, gt);
|
2017-10-09 18:03:00 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->i915->drm.struct_mutex);
|
|
|
|
igt_flush_test(gt->i915, I915_WAIT_LOCKED);
|
|
|
|
mutex_unlock(>->i915->drm.struct_mutex);
|
2018-03-22 14:49:08 +07:00
|
|
|
|
2017-12-17 20:28:52 +07:00
|
|
|
i915_modparams.enable_hangcheck = saved_hangcheck;
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
2017-10-09 18:03:00 +07:00
|
|
|
|
|
|
|
return err;
|
2017-02-14 00:15:58 +07:00
|
|
|
}
|