mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 17:58:24 +07:00
3f04bdce72
Getting wedged device on driver init is pretty much unrecoverable. Since we're running various scenarios that may potentially hit this in CI (module reload / selftests / hotunplug), and if it happens, it means that we can't trust any subsequent CI results, we should just apply the taint to let the CI know that it should reboot (CI checks taint between test runs). v2: Comment that WEDGED_ON_INIT is non-recoverable, distinguish WEDGED_ON_INIT from WEDGED_ON_FINI (Chris) v3: Appease checkpatch, fixup search-replace logic expression mindbomb in assert (Chris) Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Petri Latvala <petri.latvala@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20200706144107.204821-1-michal@hardline.pl
645 lines
15 KiB
C
645 lines
15 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2019 Intel Corporation
|
|
*/
|
|
|
|
#include "debugfs_gt.h"
|
|
#include "i915_drv.h"
|
|
#include "intel_context.h"
|
|
#include "intel_gt.h"
|
|
#include "intel_gt_buffer_pool.h"
|
|
#include "intel_gt_clock_utils.h"
|
|
#include "intel_gt_pm.h"
|
|
#include "intel_gt_requests.h"
|
|
#include "intel_mocs.h"
|
|
#include "intel_rc6.h"
|
|
#include "intel_renderstate.h"
|
|
#include "intel_rps.h"
|
|
#include "intel_uncore.h"
|
|
#include "intel_pm.h"
|
|
#include "shmem_utils.h"
|
|
|
|
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
|
|
{
|
|
gt->i915 = i915;
|
|
gt->uncore = &i915->uncore;
|
|
|
|
spin_lock_init(>->irq_lock);
|
|
|
|
INIT_LIST_HEAD(>->closed_vma);
|
|
spin_lock_init(>->closed_lock);
|
|
|
|
intel_gt_init_buffer_pool(gt);
|
|
intel_gt_init_reset(gt);
|
|
intel_gt_init_requests(gt);
|
|
intel_gt_init_timelines(gt);
|
|
intel_gt_pm_init_early(gt);
|
|
|
|
intel_rps_init_early(>->rps);
|
|
intel_uc_init_early(>->uc);
|
|
}
|
|
|
|
void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
|
|
{
|
|
gt->ggtt = ggtt;
|
|
}
|
|
|
|
static void init_unused_ring(struct intel_gt *gt, u32 base)
|
|
{
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
|
|
intel_uncore_write(uncore, RING_CTL(base), 0);
|
|
intel_uncore_write(uncore, RING_HEAD(base), 0);
|
|
intel_uncore_write(uncore, RING_TAIL(base), 0);
|
|
intel_uncore_write(uncore, RING_START(base), 0);
|
|
}
|
|
|
|
static void init_unused_rings(struct intel_gt *gt)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
|
|
if (IS_I830(i915)) {
|
|
init_unused_ring(gt, PRB1_BASE);
|
|
init_unused_ring(gt, SRB0_BASE);
|
|
init_unused_ring(gt, SRB1_BASE);
|
|
init_unused_ring(gt, SRB2_BASE);
|
|
init_unused_ring(gt, SRB3_BASE);
|
|
} else if (IS_GEN(i915, 2)) {
|
|
init_unused_ring(gt, SRB0_BASE);
|
|
init_unused_ring(gt, SRB1_BASE);
|
|
} else if (IS_GEN(i915, 3)) {
|
|
init_unused_ring(gt, PRB1_BASE);
|
|
init_unused_ring(gt, PRB2_BASE);
|
|
}
|
|
}
|
|
|
|
int intel_gt_init_hw(struct intel_gt *gt)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
int ret;
|
|
|
|
gt->last_init_time = ktime_get();
|
|
|
|
/* Double layer security blanket, see i915_gem_init() */
|
|
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
|
|
|
if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
|
|
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
|
|
|
|
if (IS_HASWELL(i915))
|
|
intel_uncore_write(uncore,
|
|
MI_PREDICATE_RESULT_2,
|
|
IS_HSW_GT3(i915) ?
|
|
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
|
|
|
|
/* Apply the GT workarounds... */
|
|
intel_gt_apply_workarounds(gt);
|
|
/* ...and determine whether they are sticking. */
|
|
intel_gt_verify_workarounds(gt, "init");
|
|
|
|
intel_gt_init_swizzling(gt);
|
|
|
|
/*
|
|
* At least 830 can leave some of the unused rings
|
|
* "active" (ie. head != tail) after resume which
|
|
* will prevent c3 entry. Makes sure all unused rings
|
|
* are totally idle.
|
|
*/
|
|
init_unused_rings(gt);
|
|
|
|
ret = i915_ppgtt_init_hw(gt);
|
|
if (ret) {
|
|
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
|
|
goto out;
|
|
}
|
|
|
|
/* We can't enable contexts until all firmware is loaded */
|
|
ret = intel_uc_init_hw(>->uc);
|
|
if (ret) {
|
|
i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
|
|
goto out;
|
|
}
|
|
|
|
intel_mocs_init(gt);
|
|
|
|
out:
|
|
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
|
return ret;
|
|
}
|
|
|
|
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
|
|
{
|
|
intel_uncore_rmw(uncore, reg, 0, set);
|
|
}
|
|
|
|
static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
|
|
{
|
|
intel_uncore_rmw(uncore, reg, clr, 0);
|
|
}
|
|
|
|
static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
|
|
{
|
|
intel_uncore_rmw(uncore, reg, 0, 0);
|
|
}
|
|
|
|
static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
|
|
{
|
|
GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
|
|
GEN6_RING_FAULT_REG_POSTING_READ(engine);
|
|
}
|
|
|
|
void
|
|
intel_gt_clear_error_registers(struct intel_gt *gt,
|
|
intel_engine_mask_t engine_mask)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
u32 eir;
|
|
|
|
if (!IS_GEN(i915, 2))
|
|
clear_register(uncore, PGTBL_ER);
|
|
|
|
if (INTEL_GEN(i915) < 4)
|
|
clear_register(uncore, IPEIR(RENDER_RING_BASE));
|
|
else
|
|
clear_register(uncore, IPEIR_I965);
|
|
|
|
clear_register(uncore, EIR);
|
|
eir = intel_uncore_read(uncore, EIR);
|
|
if (eir) {
|
|
/*
|
|
* some errors might have become stuck,
|
|
* mask them.
|
|
*/
|
|
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
|
|
rmw_set(uncore, EMR, eir);
|
|
intel_uncore_write(uncore, GEN2_IIR,
|
|
I915_MASTER_ERROR_INTERRUPT);
|
|
}
|
|
|
|
if (INTEL_GEN(i915) >= 12) {
|
|
rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
|
|
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
|
|
} else if (INTEL_GEN(i915) >= 8) {
|
|
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
|
|
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
|
|
} else if (INTEL_GEN(i915) >= 6) {
|
|
struct intel_engine_cs *engine;
|
|
enum intel_engine_id id;
|
|
|
|
for_each_engine_masked(engine, gt, engine_mask, id)
|
|
gen8_clear_engine_error_register(engine);
|
|
}
|
|
}
|
|
|
|
static void gen6_check_faults(struct intel_gt *gt)
|
|
{
|
|
struct intel_engine_cs *engine;
|
|
enum intel_engine_id id;
|
|
u32 fault;
|
|
|
|
for_each_engine(engine, gt, id) {
|
|
fault = GEN6_RING_FAULT_REG_READ(engine);
|
|
if (fault & RING_FAULT_VALID) {
|
|
drm_dbg(&engine->i915->drm, "Unexpected fault\n"
|
|
"\tAddr: 0x%08lx\n"
|
|
"\tAddress space: %s\n"
|
|
"\tSource ID: %d\n"
|
|
"\tType: %d\n",
|
|
fault & PAGE_MASK,
|
|
fault & RING_FAULT_GTTSEL_MASK ?
|
|
"GGTT" : "PPGTT",
|
|
RING_FAULT_SRCID(fault),
|
|
RING_FAULT_FAULT_TYPE(fault));
|
|
}
|
|
}
|
|
}
|
|
|
|
static void gen8_check_faults(struct intel_gt *gt)
|
|
{
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
|
|
u32 fault;
|
|
|
|
if (INTEL_GEN(gt->i915) >= 12) {
|
|
fault_reg = GEN12_RING_FAULT_REG;
|
|
fault_data0_reg = GEN12_FAULT_TLB_DATA0;
|
|
fault_data1_reg = GEN12_FAULT_TLB_DATA1;
|
|
} else {
|
|
fault_reg = GEN8_RING_FAULT_REG;
|
|
fault_data0_reg = GEN8_FAULT_TLB_DATA0;
|
|
fault_data1_reg = GEN8_FAULT_TLB_DATA1;
|
|
}
|
|
|
|
fault = intel_uncore_read(uncore, fault_reg);
|
|
if (fault & RING_FAULT_VALID) {
|
|
u32 fault_data0, fault_data1;
|
|
u64 fault_addr;
|
|
|
|
fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
|
|
fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
|
|
|
|
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
|
|
((u64)fault_data0 << 12);
|
|
|
|
drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
|
|
"\tAddr: 0x%08x_%08x\n"
|
|
"\tAddress space: %s\n"
|
|
"\tEngine ID: %d\n"
|
|
"\tSource ID: %d\n"
|
|
"\tType: %d\n",
|
|
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
|
|
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
|
|
GEN8_RING_FAULT_ENGINE_ID(fault),
|
|
RING_FAULT_SRCID(fault),
|
|
RING_FAULT_FAULT_TYPE(fault));
|
|
}
|
|
}
|
|
|
|
void intel_gt_check_and_clear_faults(struct intel_gt *gt)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
|
|
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
|
|
if (INTEL_GEN(i915) >= 8)
|
|
gen8_check_faults(gt);
|
|
else if (INTEL_GEN(i915) >= 6)
|
|
gen6_check_faults(gt);
|
|
else
|
|
return;
|
|
|
|
intel_gt_clear_error_registers(gt, ALL_ENGINES);
|
|
}
|
|
|
|
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
|
|
{
|
|
struct intel_uncore *uncore = gt->uncore;
|
|
intel_wakeref_t wakeref;
|
|
|
|
/*
|
|
* No actual flushing is required for the GTT write domain for reads
|
|
* from the GTT domain. Writes to it "immediately" go to main memory
|
|
* as far as we know, so there's no chipset flush. It also doesn't
|
|
* land in the GPU render cache.
|
|
*
|
|
* However, we do have to enforce the order so that all writes through
|
|
* the GTT land before any writes to the device, such as updates to
|
|
* the GATT itself.
|
|
*
|
|
* We also have to wait a bit for the writes to land from the GTT.
|
|
* An uncached read (i.e. mmio) seems to be ideal for the round-trip
|
|
* timing. This issue has only been observed when switching quickly
|
|
* between GTT writes and CPU reads from inside the kernel on recent hw,
|
|
* and it appears to only affect discrete GTT blocks (i.e. on LLC
|
|
* system agents we cannot reproduce this behaviour, until Cannonlake
|
|
* that was!).
|
|
*/
|
|
|
|
wmb();
|
|
|
|
if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
|
|
return;
|
|
|
|
intel_gt_chipset_flush(gt);
|
|
|
|
with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&uncore->lock, flags);
|
|
intel_uncore_posting_read_fw(uncore,
|
|
RING_HEAD(RENDER_RING_BASE));
|
|
spin_unlock_irqrestore(&uncore->lock, flags);
|
|
}
|
|
}
|
|
|
|
void intel_gt_chipset_flush(struct intel_gt *gt)
|
|
{
|
|
wmb();
|
|
if (INTEL_GEN(gt->i915) < 6)
|
|
intel_gtt_chipset_flush();
|
|
}
|
|
|
|
void intel_gt_driver_register(struct intel_gt *gt)
|
|
{
|
|
intel_rps_driver_register(>->rps);
|
|
|
|
debugfs_gt_register(gt);
|
|
}
|
|
|
|
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
|
|
{
|
|
struct drm_i915_private *i915 = gt->i915;
|
|
struct drm_i915_gem_object *obj;
|
|
struct i915_vma *vma;
|
|
int ret;
|
|
|
|
obj = i915_gem_object_create_stolen(i915, size);
|
|
if (IS_ERR(obj))
|
|
obj = i915_gem_object_create_internal(i915, size);
|
|
if (IS_ERR(obj)) {
|
|
DRM_ERROR("Failed to allocate scratch page\n");
|
|
return PTR_ERR(obj);
|
|
}
|
|
|
|
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
|
|
if (IS_ERR(vma)) {
|
|
ret = PTR_ERR(vma);
|
|
goto err_unref;
|
|
}
|
|
|
|
ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
|
|
if (ret)
|
|
goto err_unref;
|
|
|
|
gt->scratch = i915_vma_make_unshrinkable(vma);
|
|
|
|
return 0;
|
|
|
|
err_unref:
|
|
i915_gem_object_put(obj);
|
|
return ret;
|
|
}
|
|
|
|
static void intel_gt_fini_scratch(struct intel_gt *gt)
|
|
{
|
|
i915_vma_unpin_and_release(>->scratch, 0);
|
|
}
|
|
|
|
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
|
|
{
|
|
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
|
|
return &i915_ppgtt_create(gt)->vm;
|
|
else
|
|
return i915_vm_get(>->ggtt->vm);
|
|
}
|
|
|
|
static int __engines_record_defaults(struct intel_gt *gt)
|
|
{
|
|
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
|
struct intel_engine_cs *engine;
|
|
enum intel_engine_id id;
|
|
int err = 0;
|
|
|
|
/*
|
|
* As we reset the gpu during very early sanitisation, the current
|
|
* register state on the GPU should reflect its defaults values.
|
|
* We load a context onto the hw (with restore-inhibit), then switch
|
|
* over to a second context to save that default register state. We
|
|
* can then prime every new context with that state so they all start
|
|
* from the same default HW values.
|
|
*/
|
|
|
|
for_each_engine(engine, gt, id) {
|
|
struct intel_renderstate so;
|
|
struct intel_context *ce;
|
|
struct i915_request *rq;
|
|
|
|
/* We must be able to switch to something! */
|
|
GEM_BUG_ON(!engine->kernel_context);
|
|
|
|
err = intel_renderstate_init(&so, engine);
|
|
if (err)
|
|
goto out;
|
|
|
|
ce = intel_context_create(engine);
|
|
if (IS_ERR(ce)) {
|
|
err = PTR_ERR(ce);
|
|
goto out;
|
|
}
|
|
|
|
rq = intel_context_create_request(ce);
|
|
if (IS_ERR(rq)) {
|
|
err = PTR_ERR(rq);
|
|
intel_context_put(ce);
|
|
goto out;
|
|
}
|
|
|
|
err = intel_engine_emit_ctx_wa(rq);
|
|
if (err)
|
|
goto err_rq;
|
|
|
|
err = intel_renderstate_emit(&so, rq);
|
|
if (err)
|
|
goto err_rq;
|
|
|
|
err_rq:
|
|
requests[id] = i915_request_get(rq);
|
|
i915_request_add(rq);
|
|
intel_renderstate_fini(&so);
|
|
if (err)
|
|
goto out;
|
|
}
|
|
|
|
/* Flush the default context image to memory, and enable powersaving. */
|
|
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
|
|
err = -EIO;
|
|
goto out;
|
|
}
|
|
|
|
for (id = 0; id < ARRAY_SIZE(requests); id++) {
|
|
struct i915_request *rq;
|
|
struct file *state;
|
|
|
|
rq = requests[id];
|
|
if (!rq)
|
|
continue;
|
|
|
|
if (rq->fence.error) {
|
|
err = -EIO;
|
|
goto out;
|
|
}
|
|
|
|
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
|
|
if (!rq->context->state)
|
|
continue;
|
|
|
|
/* Keep a copy of the state's backing pages; free the obj */
|
|
state = shmem_create_from_object(rq->context->state->obj);
|
|
if (IS_ERR(state)) {
|
|
err = PTR_ERR(state);
|
|
goto out;
|
|
}
|
|
rq->engine->default_state = state;
|
|
}
|
|
|
|
out:
|
|
/*
|
|
* If we have to abandon now, we expect the engines to be idle
|
|
* and ready to be torn-down. The quickest way we can accomplish
|
|
* this is by declaring ourselves wedged.
|
|
*/
|
|
if (err)
|
|
intel_gt_set_wedged(gt);
|
|
|
|
for (id = 0; id < ARRAY_SIZE(requests); id++) {
|
|
struct intel_context *ce;
|
|
struct i915_request *rq;
|
|
|
|
rq = requests[id];
|
|
if (!rq)
|
|
continue;
|
|
|
|
ce = rq->context;
|
|
i915_request_put(rq);
|
|
intel_context_put(ce);
|
|
}
|
|
return err;
|
|
}
|
|
|
|
static int __engines_verify_workarounds(struct intel_gt *gt)
|
|
{
|
|
struct intel_engine_cs *engine;
|
|
enum intel_engine_id id;
|
|
int err = 0;
|
|
|
|
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
|
return 0;
|
|
|
|
for_each_engine(engine, gt, id) {
|
|
if (intel_engine_verify_workarounds(engine, "load"))
|
|
err = -EIO;
|
|
}
|
|
|
|
/* Flush and restore the kernel context for safety */
|
|
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
|
|
err = -EIO;
|
|
|
|
return err;
|
|
}
|
|
|
|
static void __intel_gt_disable(struct intel_gt *gt)
|
|
{
|
|
intel_gt_set_wedged_on_fini(gt);
|
|
|
|
intel_gt_suspend_prepare(gt);
|
|
intel_gt_suspend_late(gt);
|
|
|
|
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
|
|
}
|
|
|
|
int intel_gt_init(struct intel_gt *gt)
|
|
{
|
|
int err;
|
|
|
|
err = i915_inject_probe_error(gt->i915, -ENODEV);
|
|
if (err)
|
|
return err;
|
|
|
|
/*
|
|
* This is just a security blanket to placate dragons.
|
|
* On some systems, we very sporadically observe that the first TLBs
|
|
* used by the CS may be stale, despite us poking the TLB reset. If
|
|
* we hold the forcewake during initialisation these problems
|
|
* just magically go away.
|
|
*/
|
|
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
|
|
|
intel_gt_init_clock_frequency(gt);
|
|
|
|
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
|
|
if (err)
|
|
goto out_fw;
|
|
|
|
intel_gt_pm_init(gt);
|
|
|
|
gt->vm = kernel_vm(gt);
|
|
if (!gt->vm) {
|
|
err = -ENOMEM;
|
|
goto err_pm;
|
|
}
|
|
|
|
err = intel_engines_init(gt);
|
|
if (err)
|
|
goto err_engines;
|
|
|
|
err = intel_uc_init(>->uc);
|
|
if (err)
|
|
goto err_engines;
|
|
|
|
err = intel_gt_resume(gt);
|
|
if (err)
|
|
goto err_uc_init;
|
|
|
|
err = __engines_record_defaults(gt);
|
|
if (err)
|
|
goto err_gt;
|
|
|
|
err = __engines_verify_workarounds(gt);
|
|
if (err)
|
|
goto err_gt;
|
|
|
|
err = i915_inject_probe_error(gt->i915, -EIO);
|
|
if (err)
|
|
goto err_gt;
|
|
|
|
goto out_fw;
|
|
err_gt:
|
|
__intel_gt_disable(gt);
|
|
intel_uc_fini_hw(>->uc);
|
|
err_uc_init:
|
|
intel_uc_fini(>->uc);
|
|
err_engines:
|
|
intel_engines_release(gt);
|
|
i915_vm_put(fetch_and_zero(>->vm));
|
|
err_pm:
|
|
intel_gt_pm_fini(gt);
|
|
intel_gt_fini_scratch(gt);
|
|
out_fw:
|
|
if (err)
|
|
intel_gt_set_wedged_on_init(gt);
|
|
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
|
|
return err;
|
|
}
|
|
|
|
void intel_gt_driver_remove(struct intel_gt *gt)
|
|
{
|
|
__intel_gt_disable(gt);
|
|
|
|
intel_uc_driver_remove(>->uc);
|
|
|
|
intel_engines_release(gt);
|
|
}
|
|
|
|
void intel_gt_driver_unregister(struct intel_gt *gt)
|
|
{
|
|
intel_rps_driver_unregister(>->rps);
|
|
|
|
/*
|
|
* Upon unregistering the device to prevent any new users, cancel
|
|
* all in-flight requests so that we can quickly unbind the active
|
|
* resources.
|
|
*/
|
|
intel_gt_set_wedged(gt);
|
|
}
|
|
|
|
void intel_gt_driver_release(struct intel_gt *gt)
|
|
{
|
|
struct i915_address_space *vm;
|
|
intel_wakeref_t wakeref;
|
|
|
|
/* Scrub all HW state upon release */
|
|
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
|
__intel_gt_reset(gt, ALL_ENGINES);
|
|
|
|
vm = fetch_and_zero(>->vm);
|
|
if (vm) /* FIXME being called twice on error paths :( */
|
|
i915_vm_put(vm);
|
|
|
|
intel_gt_pm_fini(gt);
|
|
intel_gt_fini_scratch(gt);
|
|
intel_gt_fini_buffer_pool(gt);
|
|
}
|
|
|
|
void intel_gt_driver_late_release(struct intel_gt *gt)
|
|
{
|
|
/* We need to wait for inflight RCU frees to release their grip */
|
|
rcu_barrier();
|
|
|
|
intel_uc_driver_late_release(>->uc);
|
|
intel_gt_fini_requests(gt);
|
|
intel_gt_fini_reset(gt);
|
|
intel_gt_fini_timelines(gt);
|
|
intel_engines_free(gt);
|
|
}
|