2019-01-16 22:33:04 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2008-2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched/mm.h>
|
2019-01-25 20:22:28 +07:00
|
|
|
#include <linux/stop_machine.h>
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-08-06 18:39:33 +07:00
|
|
|
#include "display/intel_display_types.h"
|
2019-06-13 15:44:16 +07:00
|
|
|
#include "display/intel_overlay.h"
|
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_context.h"
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_gpu_error.h"
|
2019-04-29 19:29:27 +07:00
|
|
|
#include "i915_irq.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "intel_engine_pm.h"
|
2019-06-21 14:07:44 +07:00
|
|
|
#include "intel_gt.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "intel_gt_pm.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "intel_reset.h"
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 17:00:11 +07:00
|
|
|
#include "uc/intel_guc.h"
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-01-25 20:22:26 +07:00
|
|
|
#define RESET_MAX_RETRIES 3
|
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
/* XXX How to handle concurrent GGTT updates using tiling registers? */
|
|
|
|
#define RESET_UNDER_STOP_MACHINE 0
|
|
|
|
|
2019-04-06 03:24:19 +07:00
|
|
|
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
|
|
|
|
{
|
|
|
|
intel_uncore_rmw_fw(uncore, reg, 0, set);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
|
|
|
|
{
|
|
|
|
intel_uncore_rmw_fw(uncore, reg, clr, 0);
|
|
|
|
}
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
static void engine_skip_context(struct i915_request *rq)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine = rq->engine;
|
|
|
|
struct i915_gem_context *hung_ctx = rq->gem_context;
|
|
|
|
|
2019-06-14 23:46:06 +07:00
|
|
|
lockdep_assert_held(&engine->active.lock);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-02-14 01:27:37 +07:00
|
|
|
if (!i915_request_is_active(rq))
|
|
|
|
return;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-06-14 23:46:06 +07:00
|
|
|
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
|
2019-02-14 01:27:37 +07:00
|
|
|
if (rq->gem_context == hung_ctx)
|
|
|
|
i915_request_skip(rq, -EIO);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void client_mark_guilty(struct drm_i915_file_private *file_priv,
|
|
|
|
const struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
unsigned int score;
|
|
|
|
unsigned long prev_hang;
|
|
|
|
|
|
|
|
if (i915_gem_context_is_banned(ctx))
|
|
|
|
score = I915_CLIENT_SCORE_CONTEXT_BAN;
|
|
|
|
else
|
|
|
|
score = 0;
|
|
|
|
|
|
|
|
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
|
|
|
|
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
|
|
|
|
score += I915_CLIENT_SCORE_HANG_FAST;
|
|
|
|
|
|
|
|
if (score) {
|
|
|
|
atomic_add(score, &file_priv->ban_score);
|
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
|
|
|
|
ctx->name, score,
|
|
|
|
atomic_read(&file_priv->ban_score));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
static bool context_mark_guilty(struct i915_gem_context *ctx)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
drm/i915: Use time based guilty context banning
Currently, we accumulate each time a context hangs the GPU, offset
against the number of requests it submits, and if that score exceeds a
certain threshold, we ban that context from submitting any more requests
(cancelling any work in flight). In contrast, we use a simple timer on
the file, that if we see more than a 9 hangs faster than 60s apart in
total across all of its contexts, we will ban the client from creating
any more contexts. This leads to a confusing situation where the file
may be banned before the context, so lets use a simple timer scheme for
each.
If the context submits 3 hanging requests within a 120s period, declare
it forbidden to ever send more requests.
This has the advantage of not being easy to repair by simply sending
empty requests, but has the disadvantage that if the context is idle
then it is forgiven. However, if the context is idle, it is not
disrupting the system, but a hog can evade the request counting and
cause much more severe disruption to the system.
Updating ban_score from request retirement is dubious as the retirement
is purposely not in sync with request submission (i.e. we try and batch
retirement to reduce overhead and avoid latency on submission), which
leads to surprising situations where we can forgive a hang immediately
due to a backlog of requests from before the hang being retired
afterwards.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190219122215.8941-2-chris@chris-wilson.co.uk
2019-02-19 19:21:52 +07:00
|
|
|
unsigned long prev_hang;
|
|
|
|
bool banned;
|
|
|
|
int i;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
atomic_inc(&ctx->guilty_count);
|
|
|
|
|
drm/i915: Use time based guilty context banning
Currently, we accumulate each time a context hangs the GPU, offset
against the number of requests it submits, and if that score exceeds a
certain threshold, we ban that context from submitting any more requests
(cancelling any work in flight). In contrast, we use a simple timer on
the file, that if we see more than a 9 hangs faster than 60s apart in
total across all of its contexts, we will ban the client from creating
any more contexts. This leads to a confusing situation where the file
may be banned before the context, so lets use a simple timer scheme for
each.
If the context submits 3 hanging requests within a 120s period, declare
it forbidden to ever send more requests.
This has the advantage of not being easy to repair by simply sending
empty requests, but has the disadvantage that if the context is idle
then it is forgiven. However, if the context is idle, it is not
disrupting the system, but a hog can evade the request counting and
cause much more severe disruption to the system.
Updating ban_score from request retirement is dubious as the retirement
is purposely not in sync with request submission (i.e. we try and batch
retirement to reduce overhead and avoid latency on submission), which
leads to surprising situations where we can forgive a hang immediately
due to a backlog of requests from before the hang being retired
afterwards.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190219122215.8941-2-chris@chris-wilson.co.uk
2019-02-19 19:21:52 +07:00
|
|
|
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
|
|
|
|
if (!i915_gem_context_is_bannable(ctx))
|
2019-01-25 20:22:28 +07:00
|
|
|
return false;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
drm/i915: Use time based guilty context banning
Currently, we accumulate each time a context hangs the GPU, offset
against the number of requests it submits, and if that score exceeds a
certain threshold, we ban that context from submitting any more requests
(cancelling any work in flight). In contrast, we use a simple timer on
the file, that if we see more than a 9 hangs faster than 60s apart in
total across all of its contexts, we will ban the client from creating
any more contexts. This leads to a confusing situation where the file
may be banned before the context, so lets use a simple timer scheme for
each.
If the context submits 3 hanging requests within a 120s period, declare
it forbidden to ever send more requests.
This has the advantage of not being easy to repair by simply sending
empty requests, but has the disadvantage that if the context is idle
then it is forgiven. However, if the context is idle, it is not
disrupting the system, but a hog can evade the request counting and
cause much more severe disruption to the system.
Updating ban_score from request retirement is dubious as the retirement
is purposely not in sync with request submission (i.e. we try and batch
retirement to reduce overhead and avoid latency on submission), which
leads to surprising situations where we can forgive a hang immediately
due to a backlog of requests from before the hang being retired
afterwards.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190219122215.8941-2-chris@chris-wilson.co.uk
2019-02-19 19:21:52 +07:00
|
|
|
/* Record the timestamp for the last N hangs */
|
|
|
|
prev_hang = ctx->hang_timestamp[0];
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
|
|
|
|
ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
|
|
|
|
ctx->hang_timestamp[i] = jiffies;
|
|
|
|
|
|
|
|
/* If we have hung N+1 times in rapid succession, we ban the context! */
|
|
|
|
banned = !i915_gem_context_is_recoverable(ctx);
|
|
|
|
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
|
|
|
|
banned = true;
|
2019-01-16 22:33:04 +07:00
|
|
|
if (banned) {
|
drm/i915: Use time based guilty context banning
Currently, we accumulate each time a context hangs the GPU, offset
against the number of requests it submits, and if that score exceeds a
certain threshold, we ban that context from submitting any more requests
(cancelling any work in flight). In contrast, we use a simple timer on
the file, that if we see more than a 9 hangs faster than 60s apart in
total across all of its contexts, we will ban the client from creating
any more contexts. This leads to a confusing situation where the file
may be banned before the context, so lets use a simple timer scheme for
each.
If the context submits 3 hanging requests within a 120s period, declare
it forbidden to ever send more requests.
This has the advantage of not being easy to repair by simply sending
empty requests, but has the disadvantage that if the context is idle
then it is forgiven. However, if the context is idle, it is not
disrupting the system, but a hog can evade the request counting and
cause much more severe disruption to the system.
Updating ban_score from request retirement is dubious as the retirement
is purposely not in sync with request submission (i.e. we try and batch
retirement to reduce overhead and avoid latency on submission), which
leads to surprising situations where we can forgive a hang immediately
due to a backlog of requests from before the hang being retired
afterwards.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190219122215.8941-2-chris@chris-wilson.co.uk
2019-02-19 19:21:52 +07:00
|
|
|
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
|
|
|
|
ctx->name, atomic_read(&ctx->guilty_count));
|
2019-01-16 22:33:04 +07:00
|
|
|
i915_gem_context_set_banned(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(ctx->file_priv))
|
|
|
|
client_mark_guilty(ctx->file_priv, ctx);
|
2019-01-25 20:22:28 +07:00
|
|
|
|
|
|
|
return banned;
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void context_mark_innocent(struct i915_gem_context *ctx)
|
|
|
|
{
|
|
|
|
atomic_inc(&ctx->active_count);
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void __i915_request_reset(struct i915_request *rq, bool guilty)
|
2019-01-25 20:22:28 +07:00
|
|
|
{
|
2019-03-12 18:11:45 +07:00
|
|
|
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
|
|
|
|
rq->engine->name,
|
|
|
|
rq->fence.context,
|
|
|
|
rq->fence.seqno,
|
|
|
|
yesno(guilty));
|
|
|
|
|
2019-06-14 23:46:06 +07:00
|
|
|
lockdep_assert_held(&rq->engine->active.lock);
|
2019-01-25 20:22:28 +07:00
|
|
|
GEM_BUG_ON(i915_request_completed(rq));
|
|
|
|
|
|
|
|
if (guilty) {
|
|
|
|
i915_request_skip(rq, -EIO);
|
|
|
|
if (context_mark_guilty(rq->gem_context))
|
|
|
|
engine_skip_context(rq);
|
|
|
|
} else {
|
|
|
|
dma_fence_set_error(&rq->fence, -EAGAIN);
|
|
|
|
context_mark_innocent(rq->gem_context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
static bool i915_in_reset(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
u8 gdrst;
|
|
|
|
|
|
|
|
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
|
|
|
|
return gdrst & GRDOM_RESET_STATUS;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int i915_do_reset(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct pci_dev *pdev = gt->i915->drm.pdev;
|
2019-01-16 22:33:04 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
2019-01-25 20:22:26 +07:00
|
|
|
udelay(50);
|
|
|
|
err = wait_for_atomic(i915_in_reset(pdev), 50);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/* Clear the reset request. */
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST, 0);
|
2019-01-25 20:22:26 +07:00
|
|
|
udelay(50);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (!err)
|
2019-01-25 20:22:26 +07:00
|
|
|
err = wait_for_atomic(!i915_in_reset(pdev), 50);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool g4x_reset_complete(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
u8 gdrst;
|
|
|
|
|
|
|
|
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
|
|
|
|
return (gdrst & GRDOM_RESET_ENABLE) == 0;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int g33_do_reset(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct pci_dev *pdev = gt->i915->drm.pdev;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
2019-01-25 20:22:26 +07:00
|
|
|
return wait_for_atomic(g4x_reset_complete(pdev), 50);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int g4x_do_reset(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct pci_dev *pdev = gt->i915->drm.pdev;
|
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
|
2019-04-06 03:24:19 +07:00
|
|
|
rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
|
|
|
|
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST,
|
|
|
|
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
2019-01-25 20:22:26 +07:00
|
|
|
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("Wait for media reset failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST,
|
|
|
|
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
2019-01-25 20:22:26 +07:00
|
|
|
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("Wait for render reset failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
pci_write_config_byte(pdev, I915_GDRST, 0);
|
|
|
|
|
2019-04-06 03:24:19 +07:00
|
|
|
rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
|
|
|
|
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int ironlake_do_reset(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-03-26 04:49:38 +07:00
|
|
|
intel_uncore_write_fw(uncore, ILK_GDSR,
|
|
|
|
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
|
|
|
|
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
|
2019-01-25 20:22:26 +07:00
|
|
|
ILK_GRDOM_RESET_ENABLE, 0,
|
|
|
|
5000, 0,
|
|
|
|
NULL);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("Wait for render reset failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-03-26 04:49:38 +07:00
|
|
|
intel_uncore_write_fw(uncore, ILK_GDSR,
|
|
|
|
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
|
|
|
|
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
|
2019-01-25 20:22:26 +07:00
|
|
|
ILK_GRDOM_RESET_ENABLE, 0,
|
|
|
|
5000, 0,
|
|
|
|
NULL);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_DEBUG_DRIVER("Wait for media reset failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2019-03-26 04:49:38 +07:00
|
|
|
intel_uncore_write_fw(uncore, ILK_GDSR, 0);
|
|
|
|
intel_uncore_posting_read_fw(uncore, ILK_GDSR);
|
2019-01-16 22:33:04 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
|
2019-07-13 02:29:53 +07:00
|
|
|
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_uncore *uncore = gt->uncore;
|
2019-01-16 22:33:04 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GEN6_GDRST is not in the gt power well, no need to check
|
|
|
|
* for fifo space for the write or forcewake the chip for
|
|
|
|
* the read
|
|
|
|
*/
|
2019-03-26 04:49:38 +07:00
|
|
|
intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/* Wait for the device to ack the reset requests */
|
2019-03-26 04:49:38 +07:00
|
|
|
err = __intel_wait_for_register_fw(uncore,
|
2019-01-16 22:33:04 +07:00
|
|
|
GEN6_GDRST, hw_domain_mask, 0,
|
|
|
|
500, 0,
|
|
|
|
NULL);
|
|
|
|
if (err)
|
|
|
|
DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
|
|
|
|
hw_domain_mask);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int gen6_reset_engines(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
2019-03-06 01:03:30 +07:00
|
|
|
const u32 hw_engine_mask[] = {
|
|
|
|
[RCS0] = GEN6_GRDOM_RENDER,
|
|
|
|
[BCS0] = GEN6_GRDOM_BLT,
|
|
|
|
[VCS0] = GEN6_GRDOM_MEDIA,
|
|
|
|
[VCS1] = GEN8_GRDOM_MEDIA2,
|
|
|
|
[VECS0] = GEN6_GRDOM_VECS,
|
2019-01-16 22:33:04 +07:00
|
|
|
};
|
|
|
|
u32 hw_mask;
|
|
|
|
|
|
|
|
if (engine_mask == ALL_ENGINES) {
|
|
|
|
hw_mask = GEN6_GRDOM_FULL;
|
|
|
|
} else {
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t tmp;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
hw_mask = 0;
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
2019-03-06 01:03:30 +07:00
|
|
|
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
2019-01-16 22:33:04 +07:00
|
|
|
hw_mask |= hw_engine_mask[engine->id];
|
2019-03-06 01:03:30 +07:00
|
|
|
}
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return gen6_hw_domain_reset(gt, hw_mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-04-06 01:15:50 +07:00
|
|
|
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-04-06 01:15:50 +07:00
|
|
|
struct intel_uncore *uncore = engine->uncore;
|
|
|
|
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
2019-01-16 22:33:04 +07:00
|
|
|
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
|
|
|
|
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
|
|
|
|
i915_reg_t sfc_usage;
|
|
|
|
u32 sfc_usage_bit;
|
|
|
|
u32 sfc_reset_bit;
|
|
|
|
|
|
|
|
switch (engine->class) {
|
|
|
|
case VIDEO_DECODE_CLASS:
|
|
|
|
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
|
|
|
|
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
|
|
|
|
|
|
|
|
sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
|
|
|
|
sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
|
|
|
|
|
|
|
|
sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
|
|
|
|
sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
|
|
|
|
sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDEO_ENHANCEMENT_CLASS:
|
|
|
|
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
|
|
|
|
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
|
|
|
|
|
|
|
|
sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
|
|
|
|
sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
|
|
|
|
|
|
|
|
sfc_usage = GEN11_VECS_SFC_USAGE(engine);
|
|
|
|
sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
|
|
|
|
sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the engine that a software reset is going to happen. The engine
|
|
|
|
* will then try to force lock the SFC (if currently locked, it will
|
|
|
|
* remain so until we tell the engine it is safe to unlock; if currently
|
|
|
|
* unlocked, it will ignore this and all new lock requests). If SFC
|
|
|
|
* ends up being locked to the engine we want to reset, we have to reset
|
|
|
|
* it as well (we will unlock it once the reset sequence is completed).
|
|
|
|
*/
|
2019-04-06 03:24:19 +07:00
|
|
|
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-03-26 04:49:38 +07:00
|
|
|
if (__intel_wait_for_register_fw(uncore,
|
2019-01-16 22:33:04 +07:00
|
|
|
sfc_forced_lock_ack,
|
|
|
|
sfc_forced_lock_ack_bit,
|
|
|
|
sfc_forced_lock_ack_bit,
|
|
|
|
1000, 0, NULL)) {
|
|
|
|
DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-26 04:49:38 +07:00
|
|
|
if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
|
2019-01-16 22:33:04 +07:00
|
|
|
return sfc_reset_bit;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-06 01:15:50 +07:00
|
|
|
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-04-06 01:15:50 +07:00
|
|
|
struct intel_uncore *uncore = engine->uncore;
|
|
|
|
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
2019-01-16 22:33:04 +07:00
|
|
|
i915_reg_t sfc_forced_lock;
|
|
|
|
u32 sfc_forced_lock_bit;
|
|
|
|
|
|
|
|
switch (engine->class) {
|
|
|
|
case VIDEO_DECODE_CLASS:
|
|
|
|
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
|
|
|
|
sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDEO_ENHANCEMENT_CLASS:
|
|
|
|
sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
|
|
|
|
sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-06 03:24:19 +07:00
|
|
|
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int gen11_reset_engines(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
2019-03-06 01:03:30 +07:00
|
|
|
const u32 hw_engine_mask[] = {
|
|
|
|
[RCS0] = GEN11_GRDOM_RENDER,
|
|
|
|
[BCS0] = GEN11_GRDOM_BLT,
|
|
|
|
[VCS0] = GEN11_GRDOM_MEDIA,
|
|
|
|
[VCS1] = GEN11_GRDOM_MEDIA2,
|
|
|
|
[VCS2] = GEN11_GRDOM_MEDIA3,
|
|
|
|
[VCS3] = GEN11_GRDOM_MEDIA4,
|
|
|
|
[VECS0] = GEN11_GRDOM_VECS,
|
|
|
|
[VECS1] = GEN11_GRDOM_VECS2,
|
2019-01-16 22:33:04 +07:00
|
|
|
};
|
|
|
|
struct intel_engine_cs *engine;
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t tmp;
|
2019-01-16 22:33:04 +07:00
|
|
|
u32 hw_mask;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (engine_mask == ALL_ENGINES) {
|
|
|
|
hw_mask = GEN11_GRDOM_FULL;
|
|
|
|
} else {
|
|
|
|
hw_mask = 0;
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
2019-03-06 01:03:30 +07:00
|
|
|
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
2019-01-16 22:33:04 +07:00
|
|
|
hw_mask |= hw_engine_mask[engine->id];
|
2019-04-06 01:15:50 +07:00
|
|
|
hw_mask |= gen11_lock_sfc(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
ret = gen6_hw_domain_reset(gt, hw_mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
if (engine_mask != ALL_ENGINES)
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
|
2019-04-06 01:15:50 +07:00
|
|
|
gen11_unlock_sfc(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-04-06 01:15:50 +07:00
|
|
|
struct intel_uncore *uncore = engine->uncore;
|
2019-04-12 23:53:35 +07:00
|
|
|
const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
|
|
|
|
u32 request, mask, ack;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-04-12 23:53:35 +07:00
|
|
|
ack = intel_uncore_read_fw(uncore, reg);
|
2019-04-12 23:53:53 +07:00
|
|
|
if (ack & RESET_CTL_CAT_ERROR) {
|
|
|
|
/*
|
|
|
|
* For catastrophic errors, ready-for-reset sequence
|
|
|
|
* needs to be bypassed: HAS#396813
|
|
|
|
*/
|
|
|
|
request = RESET_CTL_CAT_ERROR;
|
|
|
|
mask = RESET_CTL_CAT_ERROR;
|
|
|
|
|
|
|
|
/* Catastrophic errors need to be cleared by HW */
|
|
|
|
ack = 0;
|
|
|
|
} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
|
2019-04-12 23:53:35 +07:00
|
|
|
request = RESET_CTL_REQUEST_RESET;
|
|
|
|
mask = RESET_CTL_READY_TO_RESET;
|
|
|
|
ack = RESET_CTL_READY_TO_RESET;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-04-12 23:53:35 +07:00
|
|
|
intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
|
|
|
|
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
|
|
|
|
700, 0, NULL);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret)
|
2019-04-12 23:53:35 +07:00
|
|
|
DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
|
|
|
|
engine->name, request,
|
|
|
|
intel_uncore_read_fw(uncore, reg));
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-04-06 03:24:19 +07:00
|
|
|
intel_uncore_write_fw(engine->uncore,
|
|
|
|
RING_RESET_CTL(engine->mmio_base),
|
|
|
|
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int gen8_reset_engines(struct intel_gt *gt,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry)
|
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
const bool reset_non_ready = retry >= 1;
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t tmp;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
2019-01-16 22:33:04 +07:00
|
|
|
ret = gen8_engine_reset_prepare(engine);
|
|
|
|
if (ret && !reset_non_ready)
|
|
|
|
goto skip_reset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is not the first failed attempt to prepare,
|
|
|
|
* we decide to proceed anyway.
|
|
|
|
*
|
|
|
|
* By doing so we risk context corruption and with
|
|
|
|
* some gens (kbl), possible system hang if reset
|
|
|
|
* happens during active bb execution.
|
|
|
|
*
|
|
|
|
* We rather take context corruption instead of
|
|
|
|
* failed reset with a wedged driver/gpu. And
|
|
|
|
* active bb execution case should be covered by
|
2019-07-13 02:29:53 +07:00
|
|
|
* stop_engines() we have before the reset.
|
2019-01-16 22:33:04 +07:00
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (INTEL_GEN(gt->i915) >= 11)
|
|
|
|
ret = gen11_reset_engines(gt, engine_mask, retry);
|
2019-01-16 22:33:04 +07:00
|
|
|
else
|
2019-07-13 02:29:53 +07:00
|
|
|
ret = gen6_reset_engines(gt, engine_mask, retry);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
skip_reset:
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
|
2019-01-16 22:33:04 +07:00
|
|
|
gen8_engine_reset_cancel(engine);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
typedef int (*reset_func)(struct intel_gt *,
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t engine_mask,
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned int retry);
|
|
|
|
|
|
|
|
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
if (INTEL_GEN(i915) >= 8)
|
|
|
|
return gen8_reset_engines;
|
|
|
|
else if (INTEL_GEN(i915) >= 6)
|
|
|
|
return gen6_reset_engines;
|
|
|
|
else if (INTEL_GEN(i915) >= 5)
|
|
|
|
return ironlake_do_reset;
|
|
|
|
else if (IS_G4X(i915))
|
|
|
|
return g4x_do_reset;
|
|
|
|
else if (IS_G33(i915) || IS_PINEVIEW(i915))
|
|
|
|
return g33_do_reset;
|
|
|
|
else if (INTEL_GEN(i915) >= 3)
|
|
|
|
return i915_do_reset;
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-01-25 20:22:26 +07:00
|
|
|
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
|
|
|
|
reset_func reset;
|
|
|
|
int ret = -ETIMEDOUT;
|
2019-01-16 22:33:04 +07:00
|
|
|
int retry;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset = intel_get_gpu_reset(gt->i915);
|
2019-01-25 20:22:26 +07:00
|
|
|
if (!reset)
|
|
|
|
return -ENODEV;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the power well sleeps during the reset, the reset
|
|
|
|
* request may be dropped and never completes (causing -EIO).
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
2019-01-25 20:22:26 +07:00
|
|
|
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
|
|
|
|
GEM_TRACE("engine_mask=%x\n", engine_mask);
|
|
|
|
preempt_disable();
|
2019-07-13 02:29:53 +07:00
|
|
|
ret = reset(gt, engine_mask, retry);
|
2019-01-25 20:22:26 +07:00
|
|
|
preempt_enable();
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool intel_has_gpu_reset(struct drm_i915_private *i915)
|
|
|
|
{
|
2019-02-08 22:37:04 +07:00
|
|
|
if (!i915_modparams.reset)
|
|
|
|
return NULL;
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
return intel_get_gpu_reset(i915);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool intel_has_reset_engine(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int intel_reset_guc(struct intel_gt *gt)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
u32 guc_domain =
|
2019-07-13 02:29:53 +07:00
|
|
|
INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-07-25 07:18:06 +07:00
|
|
|
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
|
|
|
ret = gen6_hw_domain_reset(gt, guc_domain);
|
|
|
|
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure irq handler finishes, and not run again.
|
|
|
|
* Also return the active request so that we only search for it once.
|
|
|
|
*/
|
2019-01-25 20:22:28 +07:00
|
|
|
static void reset_prepare_engine(struct intel_engine_cs *engine)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* During the reset sequence, we must prevent the engine from
|
|
|
|
* entering RC6. As the context state is undefined until we restart
|
|
|
|
* the engine, if it does enter RC6 during the reset, the state
|
|
|
|
* written to the powercontext is undefined and so we may lose
|
|
|
|
* GPU state upon resume, i.e. fail to restart after a reset.
|
|
|
|
*/
|
2019-04-06 01:15:50 +07:00
|
|
|
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
2019-01-25 20:22:28 +07:00
|
|
|
engine->reset.prepare(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void revoke_mmaps(struct intel_gt *gt)
|
2019-02-08 22:37:03 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for (i = 0; i < gt->ggtt->num_fences; i++) {
|
2019-02-08 22:37:03 +07:00
|
|
|
struct drm_vma_offset_node *node;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
u64 vma_offset;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
|
2019-02-08 22:37:03 +07:00
|
|
|
if (!vma)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!i915_vma_has_userfault(vma))
|
|
|
|
continue;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
|
2019-02-08 22:37:03 +07:00
|
|
|
node = &vma->obj->base.vma_node;
|
|
|
|
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
|
2019-07-13 02:29:53 +07:00
|
|
|
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
|
2019-02-08 22:37:03 +07:00
|
|
|
drm_vma_node_offset_addr(node) + vma_offset,
|
|
|
|
vma->size,
|
|
|
|
1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
2019-06-26 22:45:48 +07:00
|
|
|
intel_engine_mask_t awake = 0;
|
2019-01-16 22:33:04 +07:00
|
|
|
enum intel_engine_id id;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-06-26 22:45:48 +07:00
|
|
|
if (intel_engine_pm_get_if_awake(engine))
|
|
|
|
awake |= engine->mask;
|
2019-01-25 20:22:28 +07:00
|
|
|
reset_prepare_engine(engine);
|
2019-06-26 22:45:48 +07:00
|
|
|
}
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 17:00:13 +07:00
|
|
|
intel_uc_reset_prepare(>->uc);
|
2019-06-26 22:45:48 +07:00
|
|
|
|
|
|
|
return awake;
|
2019-03-14 15:44:32 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void gt_revoke(struct intel_gt *gt)
|
2019-03-14 15:44:32 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
revoke_mmaps(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-01-25 20:22:28 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
int err;
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
/*
|
2019-01-25 20:22:28 +07:00
|
|
|
* Everything depends on having the GTT running, so we need to start
|
|
|
|
* there.
|
2019-01-16 22:33:04 +07:00
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
err = i915_ggtt_enable_hw(gt->i915);
|
2019-01-25 20:22:28 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id)
|
|
|
|
__intel_engine_reset(engine, stalled_mask & engine->mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_gem_restore_fences(gt->i915);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
return err;
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
static void reset_finish_engine(struct intel_engine_cs *engine)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-01-25 20:22:28 +07:00
|
|
|
engine->reset.finish(engine);
|
2019-04-06 01:15:50 +07:00
|
|
|
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
2019-06-26 22:45:48 +07:00
|
|
|
|
|
|
|
intel_engine_signal_breadcrumbs(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-01-16 22:33:04 +07:00
|
|
|
reset_finish_engine(engine);
|
2019-06-26 22:45:48 +07:00
|
|
|
if (awake & engine->mask)
|
|
|
|
intel_engine_pm_put(engine);
|
2019-03-14 15:44:32 +07:00
|
|
|
}
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nop_submit_request(struct i915_request *request)
|
|
|
|
{
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
struct intel_engine_cs *engine = request->engine;
|
2019-01-16 22:33:04 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
engine->name, request->fence.context, request->fence.seqno);
|
2019-01-16 22:33:04 +07:00
|
|
|
dma_fence_set_error(&request->fence, -EIO);
|
|
|
|
|
2019-06-14 23:46:06 +07:00
|
|
|
spin_lock_irqsave(&engine->active.lock, flags);
|
2019-01-16 22:33:04 +07:00
|
|
|
__i915_request_submit(request);
|
2019-01-29 01:18:11 +07:00
|
|
|
i915_request_mark_complete(request);
|
2019-06-14 23:46:06 +07:00
|
|
|
spin_unlock_irqrestore(&engine->active.lock, flags);
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
|
|
|
|
intel_engine_queue_breadcrumbs(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void __intel_gt_set_wedged(struct intel_gt *gt)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
2019-06-26 22:45:48 +07:00
|
|
|
intel_engine_mask_t awake;
|
2019-01-16 22:33:04 +07:00
|
|
|
enum intel_engine_id id;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (test_bit(I915_WEDGED, >->reset.flags))
|
2019-01-16 22:33:04 +07:00
|
|
|
return;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
|
2019-01-16 22:33:04 +07:00
|
|
|
struct drm_printer p = drm_debug_printer(__func__);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id)
|
2019-01-16 22:33:04 +07:00
|
|
|
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_TRACE("start\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, stop submission to hw, but do not yet complete requests by
|
|
|
|
* rolling the global seqno forward (since this would complete requests
|
|
|
|
* for which we haven't set the fence error to EIO yet).
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
awake = reset_prepare(gt);
|
2019-03-08 01:44:44 +07:00
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
/* Even if the GPU reset fails, it should still stop the engines */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
|
|
|
|
__intel_gt_reset(gt, ALL_ENGINES);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-08-06 19:42:59 +07:00
|
|
|
for_each_engine(engine, gt->i915, id)
|
2019-01-16 22:33:04 +07:00
|
|
|
engine->submit_request = nop_submit_request;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure no request can slip through without getting completed by
|
|
|
|
* either this call here to intel_engine_write_global_seqno, or the one
|
|
|
|
* in nop_submit_request.
|
|
|
|
*/
|
2019-02-19 19:21:57 +07:00
|
|
|
synchronize_rcu_expedited();
|
2019-07-13 02:29:53 +07:00
|
|
|
set_bit(I915_WEDGED, >->reset.flags);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/* Mark all executing requests as skipped */
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id)
|
2019-01-16 22:33:04 +07:00
|
|
|
engine->cancel_requests(engine);
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_finish(gt, awake);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
GEM_TRACE("end\n");
|
2019-02-08 22:37:07 +07:00
|
|
|
}
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_set_wedged(struct intel_gt *gt)
|
2019-02-08 22:37:07 +07:00
|
|
|
{
|
2019-03-18 16:51:49 +07:00
|
|
|
intel_wakeref_t wakeref;
|
2019-02-08 22:37:07 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->reset.mutex);
|
|
|
|
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
|
|
|
|
__intel_gt_set_wedged(gt);
|
|
|
|
mutex_unlock(>->reset.mutex);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt_timelines *timelines = >->timelines;
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline *tl;
|
2019-08-23 20:26:46 +07:00
|
|
|
unsigned long flags;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!test_bit(I915_WEDGED, >->reset.flags))
|
2019-01-16 22:33:04 +07:00
|
|
|
return true;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!gt->scratch) /* Never full initialised, recovery impossible */
|
2019-01-16 22:33:04 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
GEM_TRACE("start\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before unwedging, make sure that all pending operations
|
|
|
|
* are flushed and errored out - we may have requests waiting upon
|
|
|
|
* third party fences. We marked all inflight requests as EIO, and
|
|
|
|
* every execbuf since returned EIO, for consistency we want all
|
|
|
|
* the currently pending requests to also be marked as EIO, which
|
|
|
|
* is done inside our nop_submit_request - and so we must wait.
|
|
|
|
*
|
|
|
|
* No more can be submitted until we reset the wedged bit.
|
|
|
|
*/
|
2019-08-23 20:26:46 +07:00
|
|
|
spin_lock_irqsave(&timelines->lock, flags);
|
2019-07-13 02:29:53 +07:00
|
|
|
list_for_each_entry(tl, &timelines->active_list, link) {
|
2019-01-16 22:33:04 +07:00
|
|
|
struct i915_request *rq;
|
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
rq = i915_active_request_get_unlocked(&tl->last_request);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (!rq)
|
|
|
|
continue;
|
|
|
|
|
2019-08-23 20:26:46 +07:00
|
|
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
2019-08-16 03:57:07 +07:00
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
/*
|
2019-02-08 22:37:05 +07:00
|
|
|
* All internal dependencies (i915_requests) will have
|
|
|
|
* been flushed by the set-wedge, but we may be stuck waiting
|
|
|
|
* for external fences. These should all be capped to 10s
|
|
|
|
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
|
|
|
|
* in the worst case.
|
2019-01-16 22:33:04 +07:00
|
|
|
*/
|
2019-02-08 22:37:05 +07:00
|
|
|
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
|
2019-01-25 20:22:28 +07:00
|
|
|
i915_request_put(rq);
|
2019-08-16 03:57:07 +07:00
|
|
|
|
|
|
|
/* Restart iteration after droping lock */
|
2019-08-23 20:26:46 +07:00
|
|
|
spin_lock_irqsave(&timelines->lock, flags);
|
2019-08-16 03:57:07 +07:00
|
|
|
tl = list_entry(&timelines->active_list, typeof(*tl), link);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
2019-08-23 20:26:46 +07:00
|
|
|
spin_unlock_irqrestore(&timelines->lock, flags);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_sanitize(gt, false);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Undo nop_submit_request. We prevent all new i915 requests from
|
|
|
|
* being queued (by disallowing execbuf whilst wedged) so having
|
|
|
|
* waited for all active requests above, we know the system is idle
|
|
|
|
* and do not have to worry about a thread being inside
|
|
|
|
* engine->submit_request() as we swap over. So unlike installing
|
|
|
|
* the nop_submit_request on reset, we can do this from normal
|
|
|
|
* context and do not require stop_machine().
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_engines_reset_default_submission(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
GEM_TRACE("end\n");
|
|
|
|
|
|
|
|
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
|
2019-07-13 02:29:53 +07:00
|
|
|
clear_bit(I915_WEDGED, >->reset.flags);
|
2019-02-08 22:37:05 +07:00
|
|
|
|
|
|
|
return true;
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
bool intel_gt_unset_wedged(struct intel_gt *gt)
|
2019-02-08 22:37:07 +07:00
|
|
|
{
|
|
|
|
bool result;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_lock(>->reset.mutex);
|
|
|
|
result = __intel_gt_unset_wedged(gt);
|
|
|
|
mutex_unlock(>->reset.mutex);
|
2019-02-08 22:37:07 +07:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
|
2019-01-25 20:22:28 +07:00
|
|
|
{
|
|
|
|
int err, i;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
gt_revoke(gt);
|
2019-03-14 15:44:32 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
err = __intel_gt_reset(gt, ALL_ENGINES);
|
2019-01-25 20:22:28 +07:00
|
|
|
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
|
2019-02-08 22:37:03 +07:00
|
|
|
msleep(10 * (i + 1));
|
2019-07-13 02:29:53 +07:00
|
|
|
err = __intel_gt_reset(gt, ALL_ENGINES);
|
2019-01-25 20:22:28 +07:00
|
|
|
}
|
2019-02-08 22:37:03 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2019-01-25 20:22:28 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return gt_reset(gt, stalled_mask);
|
2019-01-25 20:22:28 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static int resume(struct intel_gt *gt)
|
2019-06-26 22:45:49 +07:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
enum intel_engine_id id;
|
|
|
|
int ret;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, id) {
|
2019-06-26 22:45:49 +07:00
|
|
|
ret = engine->resume(engine);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
/**
|
2019-07-13 02:29:53 +07:00
|
|
|
* intel_gt_reset - reset chip after a hang
|
|
|
|
* @gt: #intel_gt to reset
|
2019-01-16 22:33:04 +07:00
|
|
|
* @stalled_mask: mask of the stalled engines with the guilty requests
|
|
|
|
* @reason: user error message for why we are resetting
|
|
|
|
*
|
|
|
|
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
|
|
|
|
* on failure.
|
|
|
|
*
|
|
|
|
* Procedure is fairly simple:
|
|
|
|
* - reset the chip using the reset reg
|
|
|
|
* - re-init context state
|
|
|
|
* - re-init hardware status page
|
|
|
|
* - re-init ring buffer
|
|
|
|
* - re-init interrupt state
|
|
|
|
* - re-init display
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_reset(struct intel_gt *gt,
|
|
|
|
intel_engine_mask_t stalled_mask,
|
|
|
|
const char *reason)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-06-26 22:45:48 +07:00
|
|
|
intel_engine_mask_t awake;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
GEM_TRACE("flags=%lx\n", gt->reset.flags);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
might_sleep();
|
2019-07-13 02:29:53 +07:00
|
|
|
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
|
|
|
|
mutex_lock(>->reset.mutex);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/* Clear any previous failed attempts at recovery. Time to try again. */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!__intel_gt_unset_wedged(gt))
|
2019-06-12 15:52:46 +07:00
|
|
|
goto unlock;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
if (reason)
|
2019-07-13 02:29:53 +07:00
|
|
|
dev_notice(gt->i915->drm.dev,
|
|
|
|
"Resetting chip for %s\n", reason);
|
|
|
|
atomic_inc(>->i915->gpu_error.reset_count);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
awake = reset_prepare(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_has_gpu_reset(gt->i915)) {
|
2019-01-16 22:33:04 +07:00
|
|
|
if (i915_modparams.reset)
|
2019-07-13 02:29:53 +07:00
|
|
|
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
|
2019-01-16 22:33:04 +07:00
|
|
|
else
|
|
|
|
DRM_DEBUG_DRIVER("GPU reset disabled\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
|
|
|
|
intel_runtime_pm_disable_interrupts(gt->i915);
|
2019-02-18 22:31:06 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (do_reset(gt, stalled_mask)) {
|
|
|
|
dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
|
2019-01-16 22:33:04 +07:00
|
|
|
goto taint;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
|
|
|
|
intel_runtime_pm_enable_interrupts(gt->i915);
|
2019-02-18 22:31:06 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_overlay_reset(gt->i915);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Next we need to restore the context, but we don't use those
|
|
|
|
* yet either...
|
|
|
|
*
|
|
|
|
* Ring buffer needs to be re-initialized in the KMS case, or if X
|
|
|
|
* was running at the time of the reset (i.e. we weren't VT
|
|
|
|
* switched away).
|
|
|
|
*/
|
2019-09-10 21:38:20 +07:00
|
|
|
ret = intel_gt_init_hw(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
|
|
|
|
ret);
|
2019-06-26 22:45:49 +07:00
|
|
|
goto taint;
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
ret = resume(gt);
|
2019-06-26 22:45:49 +07:00
|
|
|
if (ret)
|
|
|
|
goto taint;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_queue_hangcheck(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
finish:
|
2019-07-13 02:29:53 +07:00
|
|
|
reset_finish(gt, awake);
|
2019-06-12 15:52:46 +07:00
|
|
|
unlock:
|
2019-07-13 02:29:53 +07:00
|
|
|
mutex_unlock(>->reset.mutex);
|
2019-01-16 22:33:04 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
taint:
|
|
|
|
/*
|
|
|
|
* History tells us that if we cannot reset the GPU now, we
|
|
|
|
* never will. This then impacts everything that is run
|
|
|
|
* subsequently. On failing the reset, we mark the driver
|
|
|
|
* as wedged, preventing further execution on the GPU.
|
|
|
|
* We also want to go one step further and add a taint to the
|
|
|
|
* kernel so that any subsequent faults can be traced back to
|
|
|
|
* this failure. This is important for CI, where if the
|
|
|
|
* GPU/driver fails we would like to reboot and restart testing
|
|
|
|
* rather than continue on into oblivion. For everyone else,
|
|
|
|
* the system should still plod along, but they have been warned!
|
|
|
|
*/
|
2019-05-08 18:52:45 +07:00
|
|
|
add_taint_for_CI(TAINT_WARN);
|
2019-01-16 22:33:04 +07:00
|
|
|
error:
|
2019-07-13 02:29:53 +07:00
|
|
|
__intel_gt_set_wedged(gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
return __intel_gt_reset(engine->gt, engine->mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-13 02:29:53 +07:00
|
|
|
* intel_engine_reset - reset GPU engine to recover from a hang
|
2019-01-16 22:33:04 +07:00
|
|
|
* @engine: engine to reset
|
|
|
|
* @msg: reason for GPU reset; or NULL for no dev_notice()
|
|
|
|
*
|
|
|
|
* Reset a specific GPU engine. Useful if a hang is detected.
|
|
|
|
* Returns zero on successful reset or otherwise an error code.
|
|
|
|
*
|
|
|
|
* Procedure is:
|
|
|
|
* - identifies the request that caused the hang and it is dropped
|
|
|
|
* - reset engine (which will force the engine to idle)
|
|
|
|
* - re-init/configure engine
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_gt *gt = engine->gt;
|
2019-01-16 22:33:04 +07:00
|
|
|
int ret;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
|
|
|
|
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-06-26 22:45:48 +07:00
|
|
|
if (!intel_engine_pm_get_if_awake(engine))
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-01-25 20:22:28 +07:00
|
|
|
reset_prepare_engine(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
if (msg)
|
|
|
|
dev_notice(engine->i915->drm.dev,
|
|
|
|
"Resetting %s for %s\n", engine->name, msg);
|
2019-07-13 02:29:53 +07:00
|
|
|
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 17:00:12 +07:00
|
|
|
if (!engine->gt->uc.guc.execbuf_client)
|
2019-07-13 02:29:53 +07:00
|
|
|
ret = intel_gt_reset_engine(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
else
|
2019-07-13 17:00:12 +07:00
|
|
|
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
if (ret) {
|
|
|
|
/* If we fail here, we expect to fallback to a global reset */
|
|
|
|
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
|
2019-07-13 17:00:12 +07:00
|
|
|
engine->gt->uc.guc.execbuf_client ? "GuC " : "",
|
2019-01-16 22:33:04 +07:00
|
|
|
engine->name, ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The request that caused the hang is stuck on elsp, we know the
|
|
|
|
* active request and can drop it, adjust head to skip the offending
|
|
|
|
* request to resume executing remaining requests in the queue.
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
__intel_engine_reset(engine, true);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The engine and its registers (and workarounds in case of render)
|
|
|
|
* have been reset to their default values. Follow the init_ring
|
|
|
|
* process to program RING_MODE, HWSP and re-enable submission.
|
|
|
|
*/
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
ret = engine->resume(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
intel_engine_cancel_stop_cs(engine);
|
|
|
|
reset_finish_engine(engine);
|
2019-06-26 22:45:48 +07:00
|
|
|
intel_engine_pm_put(engine);
|
2019-01-16 22:33:04 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void intel_gt_reset_global(struct intel_gt *gt,
|
|
|
|
u32 engine_mask,
|
|
|
|
const char *reason)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
|
2019-01-16 22:33:04 +07:00
|
|
|
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
|
|
|
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
|
|
|
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_wedge_me w;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
|
|
|
|
|
|
|
|
DRM_DEBUG_DRIVER("resetting chip\n");
|
|
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
|
|
|
|
|
|
|
|
/* Use a watchdog to ensure that our reset completes */
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_wedge_on_timeout(&w, gt, 5 * HZ) {
|
|
|
|
intel_prepare_reset(gt->i915);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-02-11 20:50:39 +07:00
|
|
|
/* Flush everyone using a resource about to be clobbered */
|
2019-07-13 02:29:53 +07:00
|
|
|
synchronize_srcu_expedited(>->reset.backoff_srcu);
|
2019-02-11 20:50:39 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_reset(gt, engine_mask, reason);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_finish_reset(gt->i915);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!test_bit(I915_WEDGED, >->reset.flags))
|
2019-01-16 22:33:04 +07:00
|
|
|
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-13 02:29:53 +07:00
|
|
|
* intel_gt_handle_error - handle a gpu error
|
|
|
|
* @gt: the intel_gt
|
2019-01-16 22:33:04 +07:00
|
|
|
* @engine_mask: mask representing engines that are hung
|
|
|
|
* @flags: control flags
|
|
|
|
* @fmt: Error message format string
|
|
|
|
*
|
|
|
|
* Do some basic checking of register state at error time and
|
|
|
|
* dump it to the syslog. Also call i915_capture_error_state() to make
|
|
|
|
* sure we get a record and make it available in debugfs. Fire a uevent
|
|
|
|
* so userspace knows something bad happened (should trigger collection
|
|
|
|
* of a ring dump etc.).
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_handle_error(struct intel_gt *gt,
|
|
|
|
intel_engine_mask_t engine_mask,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *fmt, ...)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
intel_wakeref_t wakeref;
|
2019-04-01 23:26:39 +07:00
|
|
|
intel_engine_mask_t tmp;
|
2019-01-16 22:33:04 +07:00
|
|
|
char error_msg[80];
|
|
|
|
char *msg = NULL;
|
|
|
|
|
|
|
|
if (fmt) {
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
|
|
|
|
msg = error_msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In most cases it's guaranteed that we get here with an RPM
|
|
|
|
* reference held, for example because there is a pending GPU
|
|
|
|
* request that won't finish until the reset is done. This
|
|
|
|
* isn't the case at least when we get here by doing a
|
|
|
|
* simulated reset via debugfs, so get an RPM reference.
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
if (flags & I915_ERROR_CAPTURE) {
|
2019-07-13 02:29:53 +07:00
|
|
|
i915_capture_error_state(gt->i915, engine_mask, msg);
|
|
|
|
intel_gt_clear_error_registers(gt, engine_mask);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try engine reset when available. We fall back to full reset if
|
|
|
|
* single reset fails.
|
|
|
|
*/
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
|
|
|
|
for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
|
2019-01-16 22:33:04 +07:00
|
|
|
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
|
|
|
|
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
2019-07-13 02:29:53 +07:00
|
|
|
>->reset.flags))
|
2019-01-16 22:33:04 +07:00
|
|
|
continue;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (intel_engine_reset(engine, msg) == 0)
|
2019-03-06 01:03:30 +07:00
|
|
|
engine_mask &= ~engine->mask;
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
|
|
|
|
>->reset.flags);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!engine_mask)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Full reset needs the mutex, stop any other user trying to do so. */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
|
|
|
|
wait_event(gt->reset.queue,
|
|
|
|
!test_bit(I915_RESET_BACKOFF, >->reset.flags));
|
2019-02-08 22:37:03 +07:00
|
|
|
goto out; /* piggy-back on the other reset */
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-02-08 22:37:03 +07:00
|
|
|
/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
|
|
|
|
synchronize_rcu_expedited();
|
|
|
|
|
2019-01-16 22:33:04 +07:00
|
|
|
/* Prevent any other reset-engine attempt. */
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, tmp) {
|
2019-01-16 22:33:04 +07:00
|
|
|
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
2019-07-13 02:29:53 +07:00
|
|
|
>->reset.flags))
|
|
|
|
wait_on_bit(>->reset.flags,
|
2019-01-16 22:33:04 +07:00
|
|
|
I915_RESET_ENGINE + engine->id,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_reset_global(gt, engine_mask, msg);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
for_each_engine(engine, gt->i915, tmp)
|
|
|
|
clear_bit_unlock(I915_RESET_ENGINE + engine->id,
|
|
|
|
>->reset.flags);
|
|
|
|
clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
wake_up_all(>->reset.queue);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
|
|
|
out:
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int intel_gt_reset_trylock(struct intel_gt *gt)
|
2019-02-08 22:37:03 +07:00
|
|
|
{
|
|
|
|
int srcu;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
might_lock(>->reset.backoff_srcu);
|
2019-02-12 20:08:31 +07:00
|
|
|
might_sleep();
|
|
|
|
|
2019-02-08 22:37:03 +07:00
|
|
|
rcu_read_lock();
|
2019-07-13 02:29:53 +07:00
|
|
|
while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
|
2019-02-08 22:37:03 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (wait_event_interruptible(gt->reset.queue,
|
2019-02-08 22:37:03 +07:00
|
|
|
!test_bit(I915_RESET_BACKOFF,
|
2019-07-13 02:29:53 +07:00
|
|
|
>->reset.flags)))
|
2019-02-08 22:37:03 +07:00
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
}
|
2019-07-13 02:29:53 +07:00
|
|
|
srcu = srcu_read_lock(>->reset.backoff_srcu);
|
2019-02-08 22:37:03 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return srcu;
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
|
|
|
|
__releases(>->reset.backoff_srcu)
|
2019-02-08 22:37:03 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
srcu_read_unlock(>->reset.backoff_srcu, tag);
|
2019-02-08 22:37:03 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
int intel_gt_terminally_wedged(struct intel_gt *gt)
|
2019-02-20 21:56:37 +07:00
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!intel_gt_is_wedged(gt))
|
2019-02-20 21:56:37 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Reset still in progress? Maybe we will recover? */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (!test_bit(I915_RESET_BACKOFF, >->reset.flags))
|
2019-02-20 21:56:37 +07:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* XXX intel_reset_finish() still takes struct_mutex!!! */
|
2019-07-13 02:29:53 +07:00
|
|
|
if (mutex_is_locked(>->i915->drm.struct_mutex))
|
2019-02-20 21:56:37 +07:00
|
|
|
return -EAGAIN;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
if (wait_event_interruptible(gt->reset.queue,
|
2019-02-20 21:56:37 +07:00
|
|
|
!test_bit(I915_RESET_BACKOFF,
|
2019-07-13 02:29:53 +07:00
|
|
|
>->reset.flags)))
|
2019-02-20 21:56:37 +07:00
|
|
|
return -EINTR;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
return intel_gt_is_wedged(gt) ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_gt_init_reset(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
init_waitqueue_head(>->reset.queue);
|
|
|
|
mutex_init(>->reset.mutex);
|
|
|
|
init_srcu_struct(>->reset.backoff_srcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_gt_fini_reset(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
cleanup_srcu_struct(>->reset.backoff_srcu);
|
2019-02-20 21:56:37 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
static void intel_wedge_me(struct work_struct *work)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
|
2019-01-16 22:33:04 +07:00
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
dev_err(w->gt->i915->drm.dev,
|
2019-01-16 22:33:04 +07:00
|
|
|
"%s timed out, cancelling all in-flight rendering.\n",
|
|
|
|
w->name);
|
2019-07-13 02:29:53 +07:00
|
|
|
intel_gt_set_wedged(w->gt);
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void __intel_init_wedge(struct intel_wedge_me *w,
|
|
|
|
struct intel_gt *gt,
|
|
|
|
long timeout,
|
|
|
|
const char *name)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
2019-07-13 02:29:53 +07:00
|
|
|
w->gt = gt;
|
2019-01-16 22:33:04 +07:00
|
|
|
w->name = name;
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
|
2019-01-16 22:33:04 +07:00
|
|
|
schedule_delayed_work(&w->work, timeout);
|
|
|
|
}
|
|
|
|
|
2019-07-13 02:29:53 +07:00
|
|
|
void __intel_fini_wedge(struct intel_wedge_me *w)
|
2019-01-16 22:33:04 +07:00
|
|
|
{
|
|
|
|
cancel_delayed_work_sync(&w->work);
|
|
|
|
destroy_delayed_work_on_stack(&w->work);
|
2019-07-13 02:29:53 +07:00
|
|
|
w->gt = NULL;
|
2019-01-16 22:33:04 +07:00
|
|
|
}
|
2019-05-23 02:31:55 +07:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftest_reset.c"
|
|
|
|
#endif
|