linux_dsm_epyc7002/drivers/gpu/drm/i915/intel_engine_cs.c

1741 lines
45 KiB
C
Raw Normal View History

/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reset.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
* on HSW) - so the final size, including the extra state required for the
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
struct engine_class_info {
const char *name;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
[RENDER_CLASS] = {
.name = "rcs",
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_vebox_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse gen order */
struct engine_mmio_base {
u32 gen : 8;
u32 base : 24;
} mmio_bases[MAX_MMIO_BASES];
};
static const struct engine_info intel_engines[] = {
[RCS0] = {
.hw_id = RCS0_HW,
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 1, .base = RENDER_RING_BASE }
},
},
[BCS0] = {
.hw_id = BCS0_HW,
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 6, .base = BLT_RING_BASE }
},
},
[VCS0] = {
.hw_id = VCS0_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD_RING_BASE },
{ .gen = 6, .base = GEN6_BSD_RING_BASE },
{ .gen = 4, .base = BSD_RING_BASE }
},
},
[VCS1] = {
.hw_id = VCS1_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD2_RING_BASE },
{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
},
},
[VCS2] = {
.hw_id = VCS2_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
},
},
[VCS3] = {
.hw_id = VCS3_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
},
},
[VECS0] = {
.hw_id = VECS0_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 11, .base = GEN11_VEBOX_RING_BASE },
{ .gen = 7, .base = VEBOX_RING_BASE }
},
},
[VECS1] = {
.hw_id = VECS1_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
{ .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
},
},
};
/**
* ___intel_engine_context_size() - return the size of the context for an engine
* @dev_priv: i915 device private
* @class: engine class
*
* Each engine class may require a different amount of space for a context
* image.
*
* Return: size (in bytes) of an engine class specific context image
*
* Note: this size includes the HWSP, which is part of the context image
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
static u32
__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) {
case RENDER_CLASS:
switch (INTEL_GEN(dev_priv)) {
default:
MISSING_CASE(INTEL_GEN(dev_priv));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10:
return GEN10_LR_CONTEXT_RENDER_SIZE;
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE;
cxt_size = I915_READ(GEN7_CXT_SIZE);
return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 6:
cxt_size = I915_READ(CXT_SIZE);
return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 5:
case 4:
case 3:
case 2:
/* For the special day when i810 gets merged. */
case 1:
return 0;
}
break;
default:
MISSING_CASE(class);
/* fall through */
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
if (INTEL_GEN(dev_priv) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
}
static u32 __engine_mmio_base(struct drm_i915_private *i915,
const struct engine_mmio_base *bases)
{
int i;
for (i = 0; i < MAX_MMIO_BASES; i++)
if (INTEL_GEN(i915) >= bases[i].gen)
break;
GEM_BUG_ON(i == MAX_MMIO_BASES);
GEM_BUG_ON(!bases[i].base);
return bases[i].base;
}
static void __sprint_engine_name(char *name, const struct engine_info *info)
{
WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
intel_engine_classes[info->class].name,
info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
if (INTEL_GEN(engine->i915) >= 3)
ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
{
/* Mask off all writes into the unknown HWSP */
intel_engine_set_hwsp_writemask(engine, ~0u);
}
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
struct intel_engine_cs *engine;
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
drm/i915: GEM_WARN_ON considered harmful GEM_WARN_ON currently has dangerous semantics where it is completely compiled out on !GEM_DEBUG builds. This can leave users who expect it to be more like a WARN_ON, just without a warning in non-debug builds, in complete ignorance. Another gotcha with it is that it cannot be used as a statement. Which is again different from a standard kernel WARN_ON. This patch fixes both problems by making it behave as one would expect. It can now be used both as an expression and as statement, and also the condition evaluates properly in all builds - code under the conditional will therefore not unexpectedly disappear. To satisfy call sites which really want the code under the conditional to completely disappear, we add GEM_DEBUG_WARN_ON and convert some of the callers to it. This one can also be used as both expression and statement. >From the above it follows GEM_DEBUG_WARN_ON should be used in situations where we are certain the condition will be hit during development, but at a place in code where error can be handled to the benefit of not crashing the machine. GEM_WARN_ON on the other hand should be used where condition may happen in production and we just want to distinguish the level of debugging output emitted between the production and debug build. v2: * Dropped BUG_ON hunk. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Tomasz Lis <tomasz.lis@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181012063142.16080-1-tvrtko.ursulin@linux.intel.com
2018-10-12 13:31:42 +07:00
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
drm/i915/pmu: Expose a PMU interface for perf queries From: Chris Wilson <chris@chris-wilson.co.uk> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> The first goal is to be able to measure GPU (and invidual ring) busyness without having to poll registers from userspace. (Which not only incurs holding the forcewake lock indefinitely, perturbing the system, but also runs the risk of hanging the machine.) As an alternative we can use the perf event counter interface to sample the ring registers periodically and send those results to userspace. Functionality we are exporting to userspace is via the existing perf PMU API and can be exercised via the existing tools. For example: perf stat -a -e i915/rcs0-busy/ -I 1000 Will print the render engine busynnes once per second. All the performance counters can be enumerated (perf list) and have their unit of measure correctly reported in sysfs. v1-v2 (Chris Wilson): v2: Use a common timer for the ring sampling. v3: (Tvrtko Ursulin) * Decouple uAPI from i915 engine ids. * Complete uAPI defines. * Refactor some code to helpers for clarity. * Skip sampling disabled engines. * Expose counters in sysfs. * Pass in fake regs to avoid null ptr deref in perf core. * Convert to class/instance uAPI. * Use shared driver code for rc6 residency, power and frequency. v4: (Dmitry Rogozhkin) * Register PMU with .task_ctx_nr=perf_invalid_context * Expose cpumask for the PMU with the single CPU in the mask * Properly support pmu->stop(): it should call pmu->read() * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE) * Introduce refcounting of event subscriptions. * Make pmu.busy_stats a refcounter to avoid busy stats going away with some deleted event. * Expose cpumask for i915 PMU to avoid multiple events creation of the same type followed by counter aggregation by perf-stat. * Track CPUs getting online/offline to migrate perf context. If (likely) cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be needed to see effect of CPU status tracking. * End result is that only global events are supported and perf stat works correctly. * Deny perf driver level sampling - it is prohibited for uncore PMU. v5: (Tvrtko Ursulin) * Don't hardcode number of engine samplers. * Rewrite event ref-counting for correctness and simplicity. * Store initial counter value when starting already enabled events to correctly report values to all listeners. * Fix RC6 residency readout. * Comments, GPL header. v6: * Add missing entry to v4 changelog. * Fix accounting in CPU hotplug case by copying the approach from arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin) v7: * Log failure message only on failure. * Remove CPU hotplug notification state on unregister. v8: * Fix error unwind on failed registration. * Checkpatch cleanup. v9: * Drop the energy metric, it is available via intel_rapl_perf. (Ville Syrjälä) * Use HAS_RC6(p). (Chris Wilson) * Handle unsupported non-engine events. (Dmitry Rogozhkin) * Rebase for intel_rc6_residency_ns needing caller managed runtime pm. * Drop HAS_RC6 checks from the read callback since creating those events will be rejected at init time already. * Add counter units to sysfs so perf stat output is nicer. * Cleanup the attribute tables for brevity and readability. v10: * Fixed queued accounting. v11: * Move intel_engine_lookup_user to intel_engine_cs.c * Commit update. (Joonas Lahtinen) v12: * More accurate sampling. (Chris Wilson) * Store and report frequency in MHz for better usability from perf stat. * Removed metrics: queued, interrupts, rc6 counters. * Sample engine busyness based on seqno difference only for less MMIO (and forcewake) on all platforms. (Chris Wilson) v13: * Comment spelling, use mul_u32_u32 to work around potential GCC issue and somne code alignment changes. (Chris Wilson) v14: * Rebase. v15: * Rebase for RPS refactoring. v16: * Use the dynamic slot in the CPU hotplug state machine so that we are free to setup our state as multi-instance. Previously we were re-using the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as multi-instance, nor owned by our driver to start with. * Register the CPU hotplug handlers after the PMU, otherwise the callback will get called before the PMU is initialized which can end up in perf_pmu_migrate_context with an un-initialized base. * Added workaround for a probable bug in cpuhp core. v17: * Remove workaround for the cpuhp bug. v18: * Rebase for drm_i915_gem_engine_class getting upstream before us. v19: * Rebase. (trivial) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
return -EINVAL;
drm/i915: GEM_WARN_ON considered harmful GEM_WARN_ON currently has dangerous semantics where it is completely compiled out on !GEM_DEBUG builds. This can leave users who expect it to be more like a WARN_ON, just without a warning in non-debug builds, in complete ignorance. Another gotcha with it is that it cannot be used as a statement. Which is again different from a standard kernel WARN_ON. This patch fixes both problems by making it behave as one would expect. It can now be used both as an expression and as statement, and also the condition evaluates properly in all builds - code under the conditional will therefore not unexpectedly disappear. To satisfy call sites which really want the code under the conditional to completely disappear, we add GEM_DEBUG_WARN_ON and convert some of the callers to it. This one can also be used as both expression and statement. >From the above it follows GEM_DEBUG_WARN_ON should be used in situations where we are certain the condition will be hit during development, but at a place in code where error can be handled to the benefit of not crashing the machine. GEM_WARN_ON on the other hand should be used where condition may happen in production and we just want to distinguish the level of debugging output emitted between the production and debug build. v2: * Dropped BUG_ON hunk. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Tomasz Lis <tomasz.lis@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181012063142.16080-1-tvrtko.ursulin@linux.intel.com
2018-10-12 13:31:42 +07:00
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
drm/i915/pmu: Expose a PMU interface for perf queries From: Chris Wilson <chris@chris-wilson.co.uk> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> The first goal is to be able to measure GPU (and invidual ring) busyness without having to poll registers from userspace. (Which not only incurs holding the forcewake lock indefinitely, perturbing the system, but also runs the risk of hanging the machine.) As an alternative we can use the perf event counter interface to sample the ring registers periodically and send those results to userspace. Functionality we are exporting to userspace is via the existing perf PMU API and can be exercised via the existing tools. For example: perf stat -a -e i915/rcs0-busy/ -I 1000 Will print the render engine busynnes once per second. All the performance counters can be enumerated (perf list) and have their unit of measure correctly reported in sysfs. v1-v2 (Chris Wilson): v2: Use a common timer for the ring sampling. v3: (Tvrtko Ursulin) * Decouple uAPI from i915 engine ids. * Complete uAPI defines. * Refactor some code to helpers for clarity. * Skip sampling disabled engines. * Expose counters in sysfs. * Pass in fake regs to avoid null ptr deref in perf core. * Convert to class/instance uAPI. * Use shared driver code for rc6 residency, power and frequency. v4: (Dmitry Rogozhkin) * Register PMU with .task_ctx_nr=perf_invalid_context * Expose cpumask for the PMU with the single CPU in the mask * Properly support pmu->stop(): it should call pmu->read() * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE) * Introduce refcounting of event subscriptions. * Make pmu.busy_stats a refcounter to avoid busy stats going away with some deleted event. * Expose cpumask for i915 PMU to avoid multiple events creation of the same type followed by counter aggregation by perf-stat. * Track CPUs getting online/offline to migrate perf context. If (likely) cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be needed to see effect of CPU status tracking. * End result is that only global events are supported and perf stat works correctly. * Deny perf driver level sampling - it is prohibited for uncore PMU. v5: (Tvrtko Ursulin) * Don't hardcode number of engine samplers. * Rewrite event ref-counting for correctness and simplicity. * Store initial counter value when starting already enabled events to correctly report values to all listeners. * Fix RC6 residency readout. * Comments, GPL header. v6: * Add missing entry to v4 changelog. * Fix accounting in CPU hotplug case by copying the approach from arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin) v7: * Log failure message only on failure. * Remove CPU hotplug notification state on unregister. v8: * Fix error unwind on failed registration. * Checkpatch cleanup. v9: * Drop the energy metric, it is available via intel_rapl_perf. (Ville Syrjälä) * Use HAS_RC6(p). (Chris Wilson) * Handle unsupported non-engine events. (Dmitry Rogozhkin) * Rebase for intel_rc6_residency_ns needing caller managed runtime pm. * Drop HAS_RC6 checks from the read callback since creating those events will be rejected at init time already. * Add counter units to sysfs so perf stat output is nicer. * Cleanup the attribute tables for brevity and readability. v10: * Fixed queued accounting. v11: * Move intel_engine_lookup_user to intel_engine_cs.c * Commit update. (Joonas Lahtinen) v12: * More accurate sampling. (Chris Wilson) * Store and report frequency in MHz for better usability from perf stat. * Removed metrics: queued, interrupts, rc6 counters. * Sample engine busyness based on seqno difference only for less MMIO (and forcewake) on all platforms. (Chris Wilson) v13: * Comment spelling, use mul_u32_u32 to work around potential GCC issue and somne code alignment changes. (Chris Wilson) v14: * Rebase. v15: * Rebase for RPS refactoring. v16: * Use the dynamic slot in the CPU hotplug state machine so that we are free to setup our state as multi-instance. Previously we were re-using the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as multi-instance, nor owned by our driver to start with. * Register the CPU hotplug handlers after the PMU, otherwise the callback will get called before the PMU is initialized which can end up in perf_pmu_migrate_context with an un-initialized base. * Added workaround for a probable bug in cpuhp core. v17: * Remove workaround for the cpuhp bug. v18: * Rebase for drm_i915_gem_engine_class getting upstream before us. v19: * Rebase. (trivial) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
return -EINVAL;
drm/i915: GEM_WARN_ON considered harmful GEM_WARN_ON currently has dangerous semantics where it is completely compiled out on !GEM_DEBUG builds. This can leave users who expect it to be more like a WARN_ON, just without a warning in non-debug builds, in complete ignorance. Another gotcha with it is that it cannot be used as a statement. Which is again different from a standard kernel WARN_ON. This patch fixes both problems by making it behave as one would expect. It can now be used both as an expression and as statement, and also the condition evaluates properly in all builds - code under the conditional will therefore not unexpectedly disappear. To satisfy call sites which really want the code under the conditional to completely disappear, we add GEM_DEBUG_WARN_ON and convert some of the callers to it. This one can also be used as both expression and statement. >From the above it follows GEM_DEBUG_WARN_ON should be used in situations where we are certain the condition will be hit during development, but at a place in code where error can be handled to the benefit of not crashing the machine. GEM_WARN_ON on the other hand should be used where condition may happen in production and we just want to distinguish the level of debugging output emitted between the production and debug build. v2: * Dropped BUG_ON hunk. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Tomasz Lis <tomasz.lis@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181012063142.16080-1-tvrtko.ursulin@linux.intel.com
2018-10-12 13:31:42 +07:00
if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
drm/i915/pmu: Expose a PMU interface for perf queries From: Chris Wilson <chris@chris-wilson.co.uk> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> The first goal is to be able to measure GPU (and invidual ring) busyness without having to poll registers from userspace. (Which not only incurs holding the forcewake lock indefinitely, perturbing the system, but also runs the risk of hanging the machine.) As an alternative we can use the perf event counter interface to sample the ring registers periodically and send those results to userspace. Functionality we are exporting to userspace is via the existing perf PMU API and can be exercised via the existing tools. For example: perf stat -a -e i915/rcs0-busy/ -I 1000 Will print the render engine busynnes once per second. All the performance counters can be enumerated (perf list) and have their unit of measure correctly reported in sysfs. v1-v2 (Chris Wilson): v2: Use a common timer for the ring sampling. v3: (Tvrtko Ursulin) * Decouple uAPI from i915 engine ids. * Complete uAPI defines. * Refactor some code to helpers for clarity. * Skip sampling disabled engines. * Expose counters in sysfs. * Pass in fake regs to avoid null ptr deref in perf core. * Convert to class/instance uAPI. * Use shared driver code for rc6 residency, power and frequency. v4: (Dmitry Rogozhkin) * Register PMU with .task_ctx_nr=perf_invalid_context * Expose cpumask for the PMU with the single CPU in the mask * Properly support pmu->stop(): it should call pmu->read() * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE) * Introduce refcounting of event subscriptions. * Make pmu.busy_stats a refcounter to avoid busy stats going away with some deleted event. * Expose cpumask for i915 PMU to avoid multiple events creation of the same type followed by counter aggregation by perf-stat. * Track CPUs getting online/offline to migrate perf context. If (likely) cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be needed to see effect of CPU status tracking. * End result is that only global events are supported and perf stat works correctly. * Deny perf driver level sampling - it is prohibited for uncore PMU. v5: (Tvrtko Ursulin) * Don't hardcode number of engine samplers. * Rewrite event ref-counting for correctness and simplicity. * Store initial counter value when starting already enabled events to correctly report values to all listeners. * Fix RC6 residency readout. * Comments, GPL header. v6: * Add missing entry to v4 changelog. * Fix accounting in CPU hotplug case by copying the approach from arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin) v7: * Log failure message only on failure. * Remove CPU hotplug notification state on unregister. v8: * Fix error unwind on failed registration. * Checkpatch cleanup. v9: * Drop the energy metric, it is available via intel_rapl_perf. (Ville Syrjälä) * Use HAS_RC6(p). (Chris Wilson) * Handle unsupported non-engine events. (Dmitry Rogozhkin) * Rebase for intel_rc6_residency_ns needing caller managed runtime pm. * Drop HAS_RC6 checks from the read callback since creating those events will be rejected at init time already. * Add counter units to sysfs so perf stat output is nicer. * Cleanup the attribute tables for brevity and readability. v10: * Fixed queued accounting. v11: * Move intel_engine_lookup_user to intel_engine_cs.c * Commit update. (Joonas Lahtinen) v12: * More accurate sampling. (Chris Wilson) * Store and report frequency in MHz for better usability from perf stat. * Removed metrics: queued, interrupts, rc6 counters. * Sample engine busyness based on seqno difference only for less MMIO (and forcewake) on all platforms. (Chris Wilson) v13: * Comment spelling, use mul_u32_u32 to work around potential GCC issue and somne code alignment changes. (Chris Wilson) v14: * Rebase. v15: * Rebase for RPS refactoring. v16: * Use the dynamic slot in the CPU hotplug state machine so that we are free to setup our state as multi-instance. Previously we were re-using the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as multi-instance, nor owned by our driver to start with. * Register the CPU hotplug handlers after the PMU, otherwise the callback will get called before the PMU is initialized which can end up in perf_pmu_migrate_context with an un-initialized base. * Added workaround for a probable bug in cpuhp core. v17: * Remove workaround for the cpuhp bug. v18: * Rebase for drm_i915_gem_engine_class getting upstream before us. v19: * Rebase. (trivial) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
return -EINVAL;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
engine->mask = BIT(id);
engine->i915 = dev_priv;
engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
seqlock_init(&engine->stats.lock);
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
drm/i915: make context status notifier head be per engine GVTg has introduced the context status notifier to schedule the GVTg workload. At that time, the notifier is bound to GVTg context only, so GVTg is not aware of host workloads. Now we are going to improve GVTg's guest workload scheduler policy, and add Guc emulation support for new Gen graphics. Both these two features require acknowledgment for all contexts running on hardware. (But will not alter host workload.) So here try to make some change. The change is simple: 1. Move the context status notifier head from i915_gem_context to intel_engine_cs. Which means there is a notifier head per engine instead of per context. Execlist driver still call notifier for each context sched-in/out events of current engine. 2. At GVTg side, it binds a notifier_block for each physical engine at GVTg initialization period. Then GVTg can hear all context status events. In this patch, GVTg do nothing for host context event, but later will add a function there. But in any case, the notifier callback is a noop if this is no active vGPU. Since intel_gvt_init() is called at early initialization stage and require the status notifier head has been initiated, I initiate it in intel_engine_setup(). v2: remove a redundant newline. (chris) Fixes: 3c7ba6359d70 ("drm/i915: Introduce execlist context status change notification") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232 Signed-off-by: Changbin Du <changbin.du@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Zhi Wang <zhi.a.wang@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/20170313024711.28591-1-changbin.du@intel.com Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2017-03-13 09:47:11 +07:00
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
drm/i915/pmu: Expose a PMU interface for perf queries From: Chris Wilson <chris@chris-wilson.co.uk> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> The first goal is to be able to measure GPU (and invidual ring) busyness without having to poll registers from userspace. (Which not only incurs holding the forcewake lock indefinitely, perturbing the system, but also runs the risk of hanging the machine.) As an alternative we can use the perf event counter interface to sample the ring registers periodically and send those results to userspace. Functionality we are exporting to userspace is via the existing perf PMU API and can be exercised via the existing tools. For example: perf stat -a -e i915/rcs0-busy/ -I 1000 Will print the render engine busynnes once per second. All the performance counters can be enumerated (perf list) and have their unit of measure correctly reported in sysfs. v1-v2 (Chris Wilson): v2: Use a common timer for the ring sampling. v3: (Tvrtko Ursulin) * Decouple uAPI from i915 engine ids. * Complete uAPI defines. * Refactor some code to helpers for clarity. * Skip sampling disabled engines. * Expose counters in sysfs. * Pass in fake regs to avoid null ptr deref in perf core. * Convert to class/instance uAPI. * Use shared driver code for rc6 residency, power and frequency. v4: (Dmitry Rogozhkin) * Register PMU with .task_ctx_nr=perf_invalid_context * Expose cpumask for the PMU with the single CPU in the mask * Properly support pmu->stop(): it should call pmu->read() * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE) * Introduce refcounting of event subscriptions. * Make pmu.busy_stats a refcounter to avoid busy stats going away with some deleted event. * Expose cpumask for i915 PMU to avoid multiple events creation of the same type followed by counter aggregation by perf-stat. * Track CPUs getting online/offline to migrate perf context. If (likely) cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be needed to see effect of CPU status tracking. * End result is that only global events are supported and perf stat works correctly. * Deny perf driver level sampling - it is prohibited for uncore PMU. v5: (Tvrtko Ursulin) * Don't hardcode number of engine samplers. * Rewrite event ref-counting for correctness and simplicity. * Store initial counter value when starting already enabled events to correctly report values to all listeners. * Fix RC6 residency readout. * Comments, GPL header. v6: * Add missing entry to v4 changelog. * Fix accounting in CPU hotplug case by copying the approach from arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin) v7: * Log failure message only on failure. * Remove CPU hotplug notification state on unregister. v8: * Fix error unwind on failed registration. * Checkpatch cleanup. v9: * Drop the energy metric, it is available via intel_rapl_perf. (Ville Syrjälä) * Use HAS_RC6(p). (Chris Wilson) * Handle unsupported non-engine events. (Dmitry Rogozhkin) * Rebase for intel_rc6_residency_ns needing caller managed runtime pm. * Drop HAS_RC6 checks from the read callback since creating those events will be rejected at init time already. * Add counter units to sysfs so perf stat output is nicer. * Cleanup the attribute tables for brevity and readability. v10: * Fixed queued accounting. v11: * Move intel_engine_lookup_user to intel_engine_cs.c * Commit update. (Joonas Lahtinen) v12: * More accurate sampling. (Chris Wilson) * Store and report frequency in MHz for better usability from perf stat. * Removed metrics: queued, interrupts, rc6 counters. * Sample engine busyness based on seqno difference only for less MMIO (and forcewake) on all platforms. (Chris Wilson) v13: * Comment spelling, use mul_u32_u32 to work around potential GCC issue and somne code alignment changes. (Chris Wilson) v14: * Rebase. v15: * Rebase for RPS refactoring. v16: * Use the dynamic slot in the CPU hotplug state machine so that we are free to setup our state as multi-instance. Previously we were re-using the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as multi-instance, nor owned by our driver to start with. * Register the CPU hotplug handlers after the PMU, otherwise the callback will get called before the PMU is initialized which can end up in perf_pmu_migrate_context with an un-initialized base. * Added workaround for a probable bug in cpuhp core. v17: * Remove workaround for the cpuhp bug. v18: * Rebase for drm_i915_gem_engine_class getting upstream before us. v19: * Rebase. (trivial) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
dev_priv->engine_class[info->class][info->instance] = engine;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
dev_priv->engine[id] = engine;
return 0;
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
unsigned int i;
int err;
WARN_ON(engine_mask == 0);
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
continue;
err = intel_engine_setup(dev_priv, i);
if (err)
goto cleanup;
mask |= BIT(i);
}
/*
* Catch failures to update intel_engines table when the new engines
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (WARN_ON(mask != engine_mask))
device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
err = -ENODEV;
goto cleanup;
}
RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
return 0;
cleanup:
for_each_engine(engine, dev_priv, id)
kfree(engine);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id, err_id;
int err;
for_each_engine(engine, dev_priv, id) {
const struct engine_class_info *class_info =
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
err = -EINVAL;
err_id = id;
drm/i915: GEM_WARN_ON considered harmful GEM_WARN_ON currently has dangerous semantics where it is completely compiled out on !GEM_DEBUG builds. This can leave users who expect it to be more like a WARN_ON, just without a warning in non-debug builds, in complete ignorance. Another gotcha with it is that it cannot be used as a statement. Which is again different from a standard kernel WARN_ON. This patch fixes both problems by making it behave as one would expect. It can now be used both as an expression and as statement, and also the condition evaluates properly in all builds - code under the conditional will therefore not unexpectedly disappear. To satisfy call sites which really want the code under the conditional to completely disappear, we add GEM_DEBUG_WARN_ON and convert some of the callers to it. This one can also be used as both expression and statement. >From the above it follows GEM_DEBUG_WARN_ON should be used in situations where we are certain the condition will be hit during development, but at a place in code where error can be handled to the benefit of not crashing the machine. GEM_WARN_ON on the other hand should be used where condition may happen in production and we just want to distinguish the level of debugging output emitted between the production and debug build. v2: * Dropped BUG_ON hunk. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Tomasz Lis <tomasz.lis@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181012063142.16080-1-tvrtko.ursulin@linux.intel.com
2018-10-12 13:31:42 +07:00
if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
err = init(engine);
if (err)
goto cleanup;
GEM_BUG_ON(!engine->submit_request);
}
return 0;
cleanup:
drm/i915: Allocate intel_engine_cs structure only for the enabled engines With the possibility of addition of many more number of rings in future, the drm_i915_private structure could bloat as an array, of type intel_engine_cs, is embedded inside it. struct intel_engine_cs engine[I915_NUM_ENGINES]; Though this is still fine as generally there is only a single instance of drm_i915_private structure used, but not all of the possible rings would be enabled or active on most of the platforms. Some memory can be saved by allocating intel_engine_cs structure only for the enabled/active engines. Currently the engine/ring ID is kept static and dev_priv->engine[] is simply indexed using the enums defined in intel_engine_id. To save memory and continue using the static engine/ring IDs, 'engine' is defined as an array of pointers. struct intel_engine_cs *engine[I915_NUM_ENGINES]; dev_priv->engine[engine_ID] will be NULL for disabled engine instances. There is a text size reduction of 928 bytes, from 1028200 to 1027272, for i915.o file (but for i915.ko file text size remain same as 1193131 bytes). v2: - Remove the engine iterator field added in drm_i915_private structure, instead pass a local iterator variable to the for_each_engine** macros. (Chris) - Do away with intel_engine_initialized() and instead directly use the NULL pointer check on engine pointer. (Chris) v3: - Remove for_each_engine_id() macro, as the updated macro for_each_engine() can be used in place of it. (Chris) - Protect the access to Render engine Fault register with a NULL check, as engine specific init is done later in Driver load sequence. v4: - Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris) - Kill the superfluous init_engine_lists(). v5: - Cleanup the intel_engines_init() & intel_engines_setup(), with respect to allocation of intel_engine_cs structure. (Chris) v6: - Rebase. v7: - Optimize the for_each_engine_masked() macro. (Chris) - Change the type of 'iter' local variable to enum intel_engine_id. (Chris) - Rebase. v8: Rebase. v9: Rebase. v10: - For index calculation use engine ID instead of pointer based arithmetic in intel_engine_sync_index() as engine pointers are not contiguous now (Chris) - For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas) - Use for_each_engine macro for cleanup in intel_engines_init() and remove check for NULL engine pointer in cleanup() routines. (Joonas) v11: Rebase. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Akash Goel <akash.goel@intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-14 00:14:48 +07:00
for_each_engine(engine, dev_priv, id) {
if (id >= err_id) {
kfree(engine);
dev_priv->engine[id] = NULL;
} else {
dev_priv->gt.cleanup_engine(engine);
}
}
return err;
}
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
}
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
/* Prevent writes into HWSP after returning the page to the system */
intel_engine_set_hwsp_writemask(engine, ~0u);
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
if (!HWS_NEEDS_PHYSICAL(engine->i915))
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
__i915_gem_object_release_unless_active(vma->obj);
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
struct i915_vma *vma)
{
unsigned int flags;
flags = PIN_GLOBAL;
if (!HAS_LLC(engine->i915))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actually map it).
*/
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
return i915_vma_pin(vma, 0, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void *vaddr;
int ret;
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
* the HWS is placed above 4G. We only allow objects to be allocated
* in GFP_DMA32 for i965, and no earlier physical address users had
* access to more than 4G.
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
}
drm/i915: Flush pages on acquisition When we return pages to the system, we ensure that they are marked as being in the CPU domain since any external access is uncontrolled and we must assume the worst. This means that we need to always flush the pages on acquisition if we need to use them on the GPU, and from the beginning have used set-domain. Set-domain is overkill for the purpose as it is a general synchronisation barrier, but our intent is to only flush the pages being swapped in. If we move that flush into the pages acquisition phase, we know then that when we have obj->mm.pages, they are coherent with the GPU and need only maintain that status without resorting to heavy handed use of set-domain. The principle knock-on effect for userspace is through mmap-gtt pagefaulting. Our uAPI has always implied that the GTT mmap was async (especially as when any pagefault occurs is unpredicatable to userspace) and so userspace had to apply explicit domain control itself (set-domain). However, swapping is transparent to the kernel, and so on first fault we need to acquire the pages and make them coherent for access through the GTT. Our use of set-domain here leaks into the uABI that the first pagefault was synchronous. This is unintentional and baring a few igt should be unoticed, nevertheless we bump the uABI version for mmap-gtt to reflect the change in behaviour. Another implication of the change is that gem_create() is presumed to create an object that is coherent with the CPU and is in the CPU write domain, so a set-domain(CPU) following a gem_create() would be a minor operation that merely checked whether we could allocate all pages for the object. On applying this change, a set-domain(CPU) causes a clflush as we acquire the pages. This will have a small impact on mesa as we move the clflush here on !llc from execbuf time to create, but that should have minimal performance impact as the same clflush exists but is now done early and because of the clflush issue, userspace recycles bo and so should resist allocating fresh objects. Internally, the presumption that objects are created in the CPU write-domain and remain so through writes to obj->mm.mapping is more prevalent than I expected; but easy enough to catch and apply a manual flush. For the future, we should push the page flush from the central set_pages() into the callers so that we can more finely control when it is applied, but for now doing it one location is easier to validate, at the cost of sometimes flushing when there is no need. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.william.auld@gmail.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Antonio Argenziano <antonio.argenziano@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 23:19:07 +07:00
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err;
}
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
engine->status_page.vma = vma;
if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
ret = pin_ggtt_status_page(engine, vma);
if (ret)
goto err_unpin;
}
return 0;
err_unpin:
i915_gem_object_unpin_map(obj);
err:
i915_gem_object_put(obj);
return ret;
}
/**
* intel_engines_setup_common - setup engine state not requiring hw access
* @engine: Engine to setup.
*
* Initializes @engine@ structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
int intel_engine_setup_common(struct intel_engine_cs *engine)
{
int err;
err = init_status_page(engine);
if (err)
return err;
err = i915_timeline_init(engine->i915,
&engine->timeline,
engine->status_page.vma);
if (err)
goto err_hwsp;
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
drm/i915: Replace global breadcrumbs with per-context interrupt tracking A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd"), the issue of handling multiple clients waiting in parallel was brought to our attention. The requirement was that every client should be woken immediately upon its request being signaled, without incurring any cpu overhead. To handle certain fragility of our hw meant that we could not do a simple check inside the irq handler (some generations required almost unbounded delays before we could be sure of seqno coherency) and so request completion checking required delegation. Before commit 688e6c725816, the solution was simple. Every client waiting on a request would be woken on every interrupt and each would do a heavyweight check to see if their request was complete. Commit 688e6c725816 introduced an rbtree so that only the earliest waiter on the global timeline would woken, and would wake the next and so on. (Along with various complications to handle requests being reordered along the global timeline, and also a requirement for kthread to provide a delegate for fence signaling that had no process context.) The global rbtree depends on knowing the execution timeline (and global seqno). Without knowing that order, we must instead check all contexts queued to the HW to see which may have advanced. We trim that list by only checking queued contexts that are being waited on, but still we keep a list of all active contexts and their active signalers that we inspect from inside the irq handler. By moving the waiters onto the fence signal list, we can combine the client wakeup with the dma_fence signaling (a dramatic reduction in complexity, but does require the HW being coherent, the seqno must be visible from the cpu before the interrupt is raised - we keep a timer backup just in case). Having previously fixed all the issues with irq-seqno serialisation (by inserting delays onto the GPU after each request instead of random delays on the CPU after each interrupt), we can rely on the seqno state to perfom direct wakeups from the interrupt handler. This allows us to preserve our single context switch behaviour of the current routine, with the only downside that we lose the RT priority sorting of wakeups. In general, direct wakeup latency of multiple clients is about the same (about 10% better in most cases) with a reduction in total CPU time spent in the waiter (about 20-50% depending on gen). Average herd behaviour is improved, but at the cost of not delegating wakeups on task_prio. v2: Capture fence signaling state for error state and add comments to warm even the most cold of hearts. v3: Check if the request is still active before busywaiting v4: Reduce the amount of pointer misdirection with list_for_each_safe and using a local i915_request variable inside the loops v5: Add a missing pluralisation to a purely informative selftest message. References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
return 0;
err_hwsp:
cleanup_status_page(engine);
return err;
}
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
static const struct {
u8 engine;
u8 sched;
} map[] = {
#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
MAP(PREEMPTION, PREEMPTION),
drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+ Having introduced per-context seqno, we now have a means to identity progress across the system without feel of rollback as befell the global_seqno. That is we can program a MI_SEMAPHORE_WAIT operation in advance of submission safe in the knowledge that our target seqno and address is stable. However, since we are telling the GPU to busy-spin on the target address until it matches the signaling seqno, we only want to do so when we are sure that busy-spin will be completed quickly. To achieve this we only submit the request to HW once the signaler is itself executing (modulo preemption causing us to wait longer), and we only do so for default and above priority requests (so that idle priority tasks never themselves hog the GPU waiting for others). As might be reasonably expected, HW semaphores excel in inter-engine synchronisation microbenchmarks (where the 3x reduced latency / increased throughput more than offset the power cost of spinning on a second ring) and have significant improvement (can be up to ~10%, most see no change) for single clients that utilize multiple engines (typically media players and transcoders), without regressing multiple clients that can saturate the system or changing the power envelope dramatically. v3: Drop the older NEQ branch, now we pin the signaler's HWSP anyway. v4: Tell the world and include it as part of scheduler caps. Testcase: igt/gem_exec_whisper Testcase: igt/benchmarks/gem_wsim Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190301170901.8340-3-chris@chris-wilson.co.uk
2019-03-02 00:09:00 +07:00
MAP(SEMAPHORES, SEMAPHORES),
#undef MAP
};
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 enabled, disabled;
enabled = 0;
disabled = 0;
for_each_engine(engine, i915, id) { /* all engines must agree! */
int i;
if (engine->schedule)
enabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
else
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
for (i = 0; i < ARRAY_SIZE(map); i++) {
if (engine->flags & BIT(map[i].engine))
enabled |= BIT(map[i].sched);
else
disabled |= BIT(map[i].sched);
}
}
i915->caps.scheduler = enabled & ~disabled;
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
struct i915_request rq;
struct i915_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
{
struct measure_breadcrumb *frame;
int dw = -ENOMEM;
GEM_BUG_ON(!engine->i915->gt.scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame)
return -ENOMEM;
if (i915_timeline_init(engine->i915,
&frame->timeline,
engine->status_page.vma))
goto out_frame;
INIT_LIST_HEAD(&frame->ring.request_list);
frame->ring.timeline = &frame->timeline;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
frame->rq.timeline = &frame->timeline;
dw = i915_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
i915_timeline_unpin(&frame->timeline);
out_timeline:
i915_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
return dw;
}
static int pin_context(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context **out)
{
struct intel_context *ce;
ce = intel_context_pin(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
*out = ce;
return 0;
}
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
*
* Initializes @engine@ structure members shared between legacy and execlists
* submission modes which do require hardware access.
*
* Typcally done at later stages of submission mode specific engine setup.
*
* Returns zero on success or an error code on failure.
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
int ret;
drm/i915: Unify active context tracking between legacy/execlists/guc The requests conversion introduced a nasty bug where we could generate a new request in the middle of constructing a request if we needed to idle the system in order to evict space for a context. The request to idle would be executed (and waited upon) before the current one, creating a minor havoc in the seqno accounting, as we will consider the current request to already be completed (prior to deferred seqno assignment) but ring->last_retired_head would have been updated and still could allow us to overwrite the current request before execution. We also employed two different mechanisms to track the active context until it was switched out. The legacy method allowed for waiting upon an active context (it could forcibly evict any vma, including context's), but the execlists method took a step backwards by pinning the vma for the entire active lifespan of the context (the only way to evict was to idle the entire GPU, not individual contexts). However, to circumvent the tricky issue of locking (i.e. we cannot take struct_mutex at the time of i915_gem_request_submit(), where we would want to move the previous context onto the active tracker and unpin it), we take the execlists approach and keep the contexts pinned until retirement. The benefit of the execlists approach, more important for execlists than legacy, was the reduction in work in pinning the context for each request - as the context was kept pinned until idle, it could short circuit the pinning for all active contexts. We introduce new engine vfuncs to pin and unpin the context respectively. The context is pinned at the start of the request, and only unpinned when the following request is retired (this ensures that the context is idle and coherent in main memory before we unpin it). We move the engine->last_context tracking into the retirement itself (rather than during request submission) in order to allow the submission to be reordered or unwound without undue difficultly. And finally an ulterior motive for unifying context handling was to prepare for mock requests. v2: Rename to last_retired_context, split out legacy_context tracking for MI_SET_CONTEXT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 22:37:20 +07:00
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
ret = pin_context(i915->kernel_context, engine,
&engine->kernel_context);
if (ret)
return ret;
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time. However, as preemption
* is optional, we allow it to fail.
*/
if (i915->preempt_context)
pin_context(i915->preempt_context, engine,
&engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
engine->set_default_submission(engine);
drm/i915: Unify active context tracking between legacy/execlists/guc The requests conversion introduced a nasty bug where we could generate a new request in the middle of constructing a request if we needed to idle the system in order to evict space for a context. The request to idle would be executed (and waited upon) before the current one, creating a minor havoc in the seqno accounting, as we will consider the current request to already be completed (prior to deferred seqno assignment) but ring->last_retired_head would have been updated and still could allow us to overwrite the current request before execution. We also employed two different mechanisms to track the active context until it was switched out. The legacy method allowed for waiting upon an active context (it could forcibly evict any vma, including context's), but the execlists method took a step backwards by pinning the vma for the entire active lifespan of the context (the only way to evict was to idle the entire GPU, not individual contexts). However, to circumvent the tricky issue of locking (i.e. we cannot take struct_mutex at the time of i915_gem_request_submit(), where we would want to move the previous context onto the active tracker and unpin it), we take the execlists approach and keep the contexts pinned until retirement. The benefit of the execlists approach, more important for execlists than legacy, was the reduction in work in pinning the context for each request - as the context was kept pinned until idle, it could short circuit the pinning for all active contexts. We introduce new engine vfuncs to pin and unpin the context respectively. The context is pinned at the start of the request, and only unpinned when the following request is retired (this ensures that the context is idle and coherent in main memory before we unpin it). We move the engine->last_context tracking into the retirement itself (rather than during request submission) in order to allow the submission to be reordered or unwound without undue difficultly. And finally an ulterior motive for unifying context handling was to prepare for mock requests. v2: Rename to last_retired_context, split out legacy_context tracking for MI_SET_CONTEXT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 22:37:20 +07:00
return 0;
err_unpin:
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
drm/i915: Unify active context tracking between legacy/execlists/guc The requests conversion introduced a nasty bug where we could generate a new request in the middle of constructing a request if we needed to idle the system in order to evict space for a context. The request to idle would be executed (and waited upon) before the current one, creating a minor havoc in the seqno accounting, as we will consider the current request to already be completed (prior to deferred seqno assignment) but ring->last_retired_head would have been updated and still could allow us to overwrite the current request before execution. We also employed two different mechanisms to track the active context until it was switched out. The legacy method allowed for waiting upon an active context (it could forcibly evict any vma, including context's), but the execlists method took a step backwards by pinning the vma for the entire active lifespan of the context (the only way to evict was to idle the entire GPU, not individual contexts). However, to circumvent the tricky issue of locking (i.e. we cannot take struct_mutex at the time of i915_gem_request_submit(), where we would want to move the previous context onto the active tracker and unpin it), we take the execlists approach and keep the contexts pinned until retirement. The benefit of the execlists approach, more important for execlists than legacy, was the reduction in work in pinning the context for each request - as the context was kept pinned until idle, it could short circuit the pinning for all active contexts. We introduce new engine vfuncs to pin and unpin the context respectively. The context is pinned at the start of the request, and only unpinned when the following request is retired (this ensures that the context is idle and coherent in main memory before we unpin it). We move the engine->last_context tracking into the retirement itself (rather than during request submission) in order to allow the submission to be reordered or unwound without undue difficultly. And finally an ulterior motive for unifying context handling was to prepare for mock requests. v2: Rename to last_retired_context, split out legacy_context tracking for MI_SET_CONTEXT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 22:37:20 +07:00
return ret;
}
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
* @engine: Engine to cleanup.
*
* This cleans up everything created by the common helpers.
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
drm/i915: Unify active context tracking between legacy/execlists/guc The requests conversion introduced a nasty bug where we could generate a new request in the middle of constructing a request if we needed to idle the system in order to evict space for a context. The request to idle would be executed (and waited upon) before the current one, creating a minor havoc in the seqno accounting, as we will consider the current request to already be completed (prior to deferred seqno assignment) but ring->last_retired_head would have been updated and still could allow us to overwrite the current request before execution. We also employed two different mechanisms to track the active context until it was switched out. The legacy method allowed for waiting upon an active context (it could forcibly evict any vma, including context's), but the execlists method took a step backwards by pinning the vma for the entire active lifespan of the context (the only way to evict was to idle the entire GPU, not individual contexts). However, to circumvent the tricky issue of locking (i.e. we cannot take struct_mutex at the time of i915_gem_request_submit(), where we would want to move the previous context onto the active tracker and unpin it), we take the execlists approach and keep the contexts pinned until retirement. The benefit of the execlists approach, more important for execlists than legacy, was the reduction in work in pinning the context for each request - as the context was kept pinned until idle, it could short circuit the pinning for all active contexts. We introduce new engine vfuncs to pin and unpin the context respectively. The context is pinned at the start of the request, and only unpinned when the following request is retired (this ensures that the context is idle and coherent in main memory before we unpin it). We move the engine->last_context tracking into the retirement itself (rather than during request submission) in order to allow the submission to be reordered or unwound without undue difficultly. And finally an ulterior motive for unifying context handling was to prepare for mock requests. v2: Rename to last_retired_context, split out legacy_context tracking for MI_SET_CONTEXT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
2016-12-18 22:37:20 +07:00
if (engine->default_state)
i915_gem_object_put(engine->default_state);
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
i915_timeline_fini(&engine->timeline);
drm/i915: Introduce per-engine workarounds We stopped re-applying the GT workarounds after engine reset since commit 59b449d5c82a ("drm/i915: Split out functions for different kinds of workarounds"). Issue with this is that some of the GT workarounds live in the MMIO space which gets lost during engine resets. So far the registers in 0x2xxx and 0xbxxx address range have been identified to be affected. This losing of applied workarounds has obvious negative effects and can even lead to hard system hangs (see the linked Bugzilla). Rather than just restoring this re-application, because we have also observed that it is not safe to just re-write all GT workarounds after engine resets (GPU might be live and weird hardware states can happen), we introduce a new class of per-engine workarounds and move only the affected GT workarounds over. Using the framework introduced in the previous patch, we therefore after engine reset, re-apply only the workarounds living in the affected MMIO address ranges. v2: * Move Wa_1406609255:icl to engine workarounds as well. * Rename API. (Chris Wilson) * Drop redundant IS_KABYLAKE. (Chris Wilson) * Re-order engine wa/ init so latest platforms are first. (Rodrigo Vivi) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Bugzilla: https://bugzilla.freedesktop.org/show_bug.cgi?id=107945 Fixes: 59b449d5c82a ("drm/i915: Split out functions for different kinds of workarounds") Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: intel-gfx@lists.freedesktop.org Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20181203133341.10258-1-tvrtko.ursulin@linux.intel.com
2018-12-03 20:33:41 +07:00
intel_wa_list_free(&engine->ctx_wa_list);
drm/i915: Introduce per-engine workarounds We stopped re-applying the GT workarounds after engine reset since commit 59b449d5c82a ("drm/i915: Split out functions for different kinds of workarounds"). Issue with this is that some of the GT workarounds live in the MMIO space which gets lost during engine resets. So far the registers in 0x2xxx and 0xbxxx address range have been identified to be affected. This losing of applied workarounds has obvious negative effects and can even lead to hard system hangs (see the linked Bugzilla). Rather than just restoring this re-application, because we have also observed that it is not safe to just re-write all GT workarounds after engine resets (GPU might be live and weird hardware states can happen), we introduce a new class of per-engine workarounds and move only the affected GT workarounds over. Using the framework introduced in the previous patch, we therefore after engine reset, re-apply only the workarounds living in the affected MMIO address ranges. v2: * Move Wa_1406609255:icl to engine workarounds as well. * Rename API. (Chris Wilson) * Drop redundant IS_KABYLAKE. (Chris Wilson) * Re-order engine wa/ init so latest platforms are first. (Rodrigo Vivi) Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Bugzilla: https://bugzilla.freedesktop.org/show_bug.cgi?id=107945 Fixes: 59b449d5c82a ("drm/i915: Split out functions for different kinds of workarounds") Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: intel-gfx@lists.freedesktop.org Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20181203133341.10258-1-tvrtko.ursulin@linux.intel.com
2018-12-03 20:33:41 +07:00
intel_wa_list_free(&engine->wa_list);
intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
u64 acthd;
if (INTEL_GEN(i915) >= 8)
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
else if (INTEL_GEN(i915) >= 4)
acthd = ENGINE_READ(engine, RING_ACTHD);
else
acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
u64 bbaddr;
if (INTEL_GEN(engine->i915) >= 8)
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
int err;
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
GEM_TRACE("%s\n", engine->name);
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, 0,
NULL)) {
GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
err = -ETIMEDOUT;
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
intel_uncore_posting_read_fw(uncore, mode);
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
GEM_TRACE("%s\n", engine->name);
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT";
default: return "";
}
}
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
u32 mcr_s_ss_select;
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
if (IS_GEN(dev_priv, 10))
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
GEN11_MCR_SUBSLICE(subslice);
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
else
mcr_s_ss_select = 0;
return mcr_s_ss_select;
}
static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
u32 mcr;
u32 ret;
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
GEN11_MCR_SUBSLICE(subslice);
} else {
mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
GEN8_MCR_SUBSLICE_MASK;
mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
}
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads WaProgramMgsrForCorrectSliceSpecificMmioReads dictate that before any MMIO read into Slice/Subslice specific registers, MCR packet control register(0xFDC) needs to be programmed to point to any enabled slice/subslice pair. Otherwise, incorrect value will be returned. However, that means each subsequent MMIO read will be forwarded to a specific slice/subslice combination as read is unicast. This is OK since slice/subslice specific register values are consistent in almost all cases across slice/subslice. There are rare occasions such as INSTDONE that this value will be dependent on slice/subslice combo, in such cases, we need to program 0xFDC and recover this after. This is already covered by read_subslice_reg. Also, 0xFDC will lose its information after TDR/engine reset/power state change. References: HSD#1405586840, BSID#0575 v2: - use fls() instead of find_last_bit() (Chris) - added INTEL_SSEU to extract sseu from device info. (Chris) v3: - rebase on latest tip v5: - Added references (Mika) - Change the ordered of passing arguments and etc. (Ursulin) v7: - Moved WA explanation Comments(Oscar) - Rebased. v8: - Renamed sanitize_mcr to calculate_s_ss_select. (Oscar) - calculate s/ss selector instead of whole mcr. (Oscar) v9: - Updated function name (Oscar) - Remove redundant variables (Oscar) v10: - Separate pre-GEN10 and GEN11 mask. (Oscar) Cc: Oscar Mateo <oscar.mateo@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Signed-off-by: Yunwei Zhang <yunwei.zhang@intel.com> Reviewed-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/1526683197-24656-1-git-send-email-yunwei.zhang@intel.com
2018-05-19 05:39:57 +07:00
mcr |= default_mcr_s_ss_select;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
return ret;
}
/* NB: please notice the memset */
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
memset(instdone, 0, sizeof(*instdone));
switch (INTEL_GEN(dev_priv)) {
default:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
break;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
GEN7_SAMPLER_INSTDONE);
instdone->row[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
GEN7_ROW_INSTDONE);
}
break;
case 7:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
break;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
instdone->sampler[0][0] =
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] =
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common =
intel_uncore_read(uncore, GEN4_INSTDONE1);
break;
case 3:
case 2:
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
break;
}
}
drm/i915: Hold a wakeref for probing the ring registers Allow intel_engine_is_idle() to be called outside of the GT wakeref by acquiring the device runtime pm for ourselves. This allows the function to act as check after we assume the engine is idle and we release the GT wakeref held whilst we have requests. At the moment, we do not call it outside of an awake context but taking the wakeref as required makes it more convenient to use for quick debugging in future. [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 5400367a864d ("drm/i915: Ensure the engine is idle before manually changing HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-05-30 19:13:33 +07:00
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_wakeref_t wakeref;
drm/i915: Hold a wakeref for probing the ring registers Allow intel_engine_is_idle() to be called outside of the GT wakeref by acquiring the device runtime pm for ourselves. This allows the function to act as check after we assume the engine is idle and we release the GT wakeref held whilst we have requests. At the moment, we do not call it outside of an awake context but taking the wakeref as required makes it more convenient to use for quick debugging in future. [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 5400367a864d ("drm/i915: Ensure the engine is idle before manually changing HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-05-30 19:13:33 +07:00
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
/* If the whole device is asleep, the engine must be idle */
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return true;
drm/i915: Hold a wakeref for probing the ring registers Allow intel_engine_is_idle() to be called outside of the GT wakeref by acquiring the device runtime pm for ourselves. This allows the function to act as check after we assume the engine is idle and we release the GT wakeref held whilst we have requests. At the moment, we do not call it outside of an awake context but taking the wakeref as required makes it more convenient to use for quick debugging in future. [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 5400367a864d ("drm/i915: Ensure the engine is idle before manually changing HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-05-30 19:13:33 +07:00
/* First check that no commands are left in the ring */
if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
if (INTEL_GEN(dev_priv) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
drm/i915: Hold a wakeref for probing the ring registers Allow intel_engine_is_idle() to be called outside of the GT wakeref by acquiring the device runtime pm for ourselves. This allows the function to act as check after we assume the engine is idle and we release the GT wakeref held whilst we have requests. At the moment, we do not call it outside of an awake context but taking the wakeref as required makes it more convenient to use for quick debugging in future. [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 5400367a864d ("drm/i915: Ensure the engine is idle before manually changing HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-05-30 19:13:33 +07:00
idle = false;
intel_runtime_pm_put(dev_priv, wakeref);
drm/i915: Hold a wakeref for probing the ring registers Allow intel_engine_is_idle() to be called outside of the GT wakeref by acquiring the device runtime pm for ourselves. This allows the function to act as check after we assume the engine is idle and we release the GT wakeref held whilst we have requests. At the moment, we do not call it outside of an awake context but taking the wakeref as required makes it more convenient to use for quick debugging in future. [ 2613.401647] RPM wakelock ref not held during HW access [ 2613.401684] ------------[ cut here ]------------ [ 2613.401720] WARNING: CPU: 5 PID: 7739 at drivers/gpu/drm/i915/intel_drv.h:1787 gen6_read32+0x21f/0x2b0 [i915] [ 2613.401731] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp snd_hda_codec_realtek coretemp snd_hda_codec_generic crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm r8169 mii mei_me lpc_ich mei prime_numbers [last unloaded: i915] [ 2613.401823] CPU: 5 PID: 7739 Comm: drv_missed_irq Tainted: G U 4.12.0-rc2-CI-CI_DRM_421+ #1 [ 2613.401825] Hardware name: MSI MS-7924/Z97M-G43(MS-7924), BIOS V1.12 02/15/2016 [ 2613.401840] task: ffff880409e3a740 task.stack: ffffc900084dc000 [ 2613.401861] RIP: 0010:gen6_read32+0x21f/0x2b0 [i915] [ 2613.401863] RSP: 0018:ffffc900084dfce8 EFLAGS: 00010292 [ 2613.401869] RAX: 000000000000002a RBX: ffff8804016a8000 RCX: 0000000000000006 [ 2613.401871] RDX: 0000000000000006 RSI: ffffffff81cbf2d9 RDI: ffffffff81c9e3a7 [ 2613.401874] RBP: ffffc900084dfd18 R08: ffff880409e3afc8 R09: 0000000000000000 [ 2613.401877] R10: 000000008a1c483f R11: 0000000000000000 R12: 000000000000209c [ 2613.401879] R13: 0000000000000001 R14: ffff8804016a8000 R15: ffff8804016ac150 [ 2613.401882] FS: 00007f39ef3dd8c0(0000) GS:ffff88041fb40000(0000) knlGS:0000000000000000 [ 2613.401885] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2613.401887] CR2: 00000000023717c8 CR3: 00000002e7b34000 CR4: 00000000001406e0 [ 2613.401889] Call Trace: [ 2613.401912] intel_engine_is_idle+0x76/0x90 [i915] [ 2613.401931] i915_gem_wait_for_idle+0xe6/0x1e0 [i915] [ 2613.401951] fault_irq_set+0x40/0x90 [i915] [ 2613.401970] i915_ring_test_irq_set+0x42/0x50 [i915] [ 2613.401976] simple_attr_write+0xc7/0xe0 [ 2613.401981] full_proxy_write+0x4f/0x70 [ 2613.401987] __vfs_write+0x23/0x120 [ 2613.401992] ? rcu_read_lock_sched_held+0x75/0x80 [ 2613.401996] ? rcu_sync_lockdep_assert+0x2a/0x50 [ 2613.401999] ? __sb_start_write+0xfa/0x1f0 [ 2613.402004] vfs_write+0xc5/0x1d0 [ 2613.402008] ? trace_hardirqs_on_caller+0xe7/0x1c0 [ 2613.402013] SyS_write+0x44/0xb0 [ 2613.402020] entry_SYSCALL_64_fastpath+0x1c/0xb1 [ 2613.402022] RIP: 0033:0x7f39eded6670 [ 2613.402025] RSP: 002b:00007fffdcdcb1a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 2613.402030] RAX: ffffffffffffffda RBX: ffffffff81470203 RCX: 00007f39eded6670 [ 2613.402033] RDX: 0000000000000001 RSI: 000000000041bc33 RDI: 0000000000000006 [ 2613.402036] RBP: ffffc900084dff88 R08: 00007f39ef3dd8c0 R09: 0000000000000001 [ 2613.402038] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000041bc33 [ 2613.402041] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 [ 2613.402046] ? __this_cpu_preempt_check+0x13/0x20 [ 2613.402052] Code: 01 9b fa e0 0f ff e9 28 fe ff ff 80 3d 6a dd 0e 00 00 0f 85 29 fe ff ff 48 c7 c7 48 19 29 a0 c6 05 56 dd 0e 00 01 e8 da 9a fa e0 <0f> ff e9 0f fe ff ff b9 01 00 00 00 ba 01 00 00 00 44 89 e6 48 [ 2613.402199] ---[ end trace 31f0cfa93ab632bf ]--- Fixes: 5400367a864d ("drm/i915: Ensure the engine is idle before manually changing HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170530121334.17364-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-05-30 19:13:33 +07:00
return idle;
}
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
*
* Return true if there are no requests pending, nothing left to be submitted
* to hardware, and that the engine is idle.
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
drm/i915: Lie and treat all engines as idle if wedged Similar to commit 8490ae207f1d ("drm/i915: Suppress busy status for engines if wedged") we also want to report intel_engine_is_idle() as true as well as the main intel_engines_are_idle(), as we now check that the engines are idle when overwriting the HWS page. This is not true whilst we are setting the device as wedged, at least according to our bookkeeping, so we have to lie to ourselves! [ 383.588601] [drm:i915_reset [i915]] *ERROR* Failed to reset chip: -110 [ 383.588685] ------------[ cut here ]------------ [ 383.588755] WARNING: CPU: 0 PID: 12 at drivers/gpu/drm/i915/intel_engine_cs.c:226 intel_engine_init_global_seqno+0x222/0x290 [i915] [ 383.588757] WARN_ON(!intel_engine_is_idle(engine)) [ 383.588759] Modules linked in: ctr ccm snd_hda_codec_hdmi snd_hda_codec_conexant snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hda_core arc4 iwldvm mac80211 snd_pcm snd_hwdep snd_seq_midi snd_seq_midi_event rfcomm bnep snd_rawmidi intel_powerclamp coretemp dm_multipath iwlwifi crct10dif_pclmul snd_seq crc32_pclmul ghash_clmulni_intel btusb aesni_intel btrtl btbcm aes_x86_64 crypto_simd cryptd btintel snd_timer glue_helper bluetooth intel_ips snd_seq_device cfg80211 snd soundcore binfmt_misc mei_me mei dm_mirror dm_region_hash dm_log i915 intel_gtt i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea prime_numbers ahci libahci drm e1000e [ 383.588851] CPU: 0 PID: 12 Comm: migration/0 Not tainted 4.11.0-rc5+ #207 [ 383.588853] Hardware name: LENOVO 514328U/514328U, BIOS 6QET44WW (1.14 ) 04/20/2010 [ 383.588855] Call Trace: [ 383.588866] dump_stack+0x63/0x90 [ 383.588871] __warn+0xc7/0xf0 [ 383.588876] warn_slowpath_fmt+0x4a/0x50 [ 383.588883] ? set_next_entity+0x821/0x910 [ 383.588943] intel_engine_init_global_seqno+0x222/0x290 [i915] [ 383.588998] __i915_gem_set_wedged_BKL+0xa4/0x190 [i915] [ 383.589003] ? __switch_to+0x215/0x390 [ 383.589008] multi_cpu_stop+0xbb/0xe0 [ 383.589012] ? cpu_stop_queue_work+0x90/0x90 [ 383.589016] cpu_stopper_thread+0x82/0x110 [ 383.589021] smpboot_thread_fn+0x137/0x190 [ 383.589026] kthread+0xf7/0x130 [ 383.589030] ? sort_range+0x20/0x20 [ 383.589034] ? kthread_park+0x90/0x90 [ 383.589040] ret_from_fork+0x2c/0x40 Fixes: 2ca9faa551c4 ("drm/i915: Assert the engine is idle before overwiting the HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170411190042.25662-1-chris@chris-wilson.co.uk
2017-04-12 02:00:42 +07:00
/* More white lies, if wedged, hw state is inconsistent */
if (i915_reset_failed(engine->i915))
drm/i915: Lie and treat all engines as idle if wedged Similar to commit 8490ae207f1d ("drm/i915: Suppress busy status for engines if wedged") we also want to report intel_engine_is_idle() as true as well as the main intel_engines_are_idle(), as we now check that the engines are idle when overwriting the HWS page. This is not true whilst we are setting the device as wedged, at least according to our bookkeeping, so we have to lie to ourselves! [ 383.588601] [drm:i915_reset [i915]] *ERROR* Failed to reset chip: -110 [ 383.588685] ------------[ cut here ]------------ [ 383.588755] WARNING: CPU: 0 PID: 12 at drivers/gpu/drm/i915/intel_engine_cs.c:226 intel_engine_init_global_seqno+0x222/0x290 [i915] [ 383.588757] WARN_ON(!intel_engine_is_idle(engine)) [ 383.588759] Modules linked in: ctr ccm snd_hda_codec_hdmi snd_hda_codec_conexant snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hda_core arc4 iwldvm mac80211 snd_pcm snd_hwdep snd_seq_midi snd_seq_midi_event rfcomm bnep snd_rawmidi intel_powerclamp coretemp dm_multipath iwlwifi crct10dif_pclmul snd_seq crc32_pclmul ghash_clmulni_intel btusb aesni_intel btrtl btbcm aes_x86_64 crypto_simd cryptd btintel snd_timer glue_helper bluetooth intel_ips snd_seq_device cfg80211 snd soundcore binfmt_misc mei_me mei dm_mirror dm_region_hash dm_log i915 intel_gtt i2c_algo_bit drm_kms_helper cfbfillrect syscopyarea cfbimgblt sysfillrect sysimgblt fb_sys_fops cfbcopyarea prime_numbers ahci libahci drm e1000e [ 383.588851] CPU: 0 PID: 12 Comm: migration/0 Not tainted 4.11.0-rc5+ #207 [ 383.588853] Hardware name: LENOVO 514328U/514328U, BIOS 6QET44WW (1.14 ) 04/20/2010 [ 383.588855] Call Trace: [ 383.588866] dump_stack+0x63/0x90 [ 383.588871] __warn+0xc7/0xf0 [ 383.588876] warn_slowpath_fmt+0x4a/0x50 [ 383.588883] ? set_next_entity+0x821/0x910 [ 383.588943] intel_engine_init_global_seqno+0x222/0x290 [i915] [ 383.588998] __i915_gem_set_wedged_BKL+0xa4/0x190 [i915] [ 383.589003] ? __switch_to+0x215/0x390 [ 383.589008] multi_cpu_stop+0xbb/0xe0 [ 383.589012] ? cpu_stop_queue_work+0x90/0x90 [ 383.589016] cpu_stopper_thread+0x82/0x110 [ 383.589021] smpboot_thread_fn+0x137/0x190 [ 383.589026] kthread+0xf7/0x130 [ 383.589030] ? sort_range+0x20/0x20 [ 383.589034] ? kthread_park+0x90/0x90 [ 383.589040] ret_from_fork+0x2c/0x40 Fixes: 2ca9faa551c4 ("drm/i915: Assert the engine is idle before overwiting the HWS") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170411190042.25662-1-chris@chris-wilson.co.uk
2017-04-12 02:00:42 +07:00
return true;
drm/i915: Filter out spurious execlists context-switch interrupts Back in commit a4b2b01523a8 ("drm/i915: Don't mark an execlists context-switch when idle") we noticed the presence of late context-switch interrupts. We were able to filter those out by looking at whether the ELSP remained active, but in commit beecec901790 ("drm/i915/execlists: Preemption!") that became problematic as we now anticipate receiving a context-switch event for preemption while ELSP may be empty. To restore the spurious interrupt suppression, add a counter for the expected number of pending context-switches and skip if we do not need to handle this interrupt to make forward progress. v2: Don't forget to switch on for preempt. v3: Reduce the counter to a on/off boolean tracker. Declare the HW as active when we first submit, and idle after the final completion event (with which we confirm the HW says it is idle), and track each source of activity separately. With a finite number of sources, it should aide us in debugging which gets stuck. Fixes: beecec901790 ("drm/i915/execlists: Preemption!") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171023213237.26536-3-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-10-24 04:32:36 +07:00
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
drm/i915: Disable bh around call to tasklet The guc submission backends expects to only be run from (at least) softirq context, but during our intel_engine_is_idle() check we would call into the tasklet to make sure it was flushed. As this could occur from process context, occasionally we would be caught out using a wait_for_atomic() not from an atomic context: [ 59.939091] WARN_ON_ONCE((1) && !(preempt_count() != 0)) [ 59.939142] WARNING: CPU: 1 PID: 2901 at drivers/gpu/drm/i915/intel_guc_submission.c:615 guc_submission_tasklet+0x784/0xa90 [i915] [ 59.939143] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic i915 x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul snd_hda_intel crc32_pclmul snd_hda_codec ghash_clmulni_intel snd_hwdep snd_hda_core e1000e snd_pcm mei_me mei prime_numbers [ 59.939164] CPU: 1 PID: 2901 Comm: gem_exec_schedu Tainted: G U W 4.18.0-rc1-g93475d62c730-drmtip_67+ #1 [ 59.939165] Hardware name: System manufacturer System Product Name/Z170M-PLUS, BIOS 3610 03/29/2018 [ 59.939188] RIP: 0010:guc_submission_tasklet+0x784/0xa90 [i915] [ 59.939189] Code: fc ff ff 80 3d 2f 87 11 00 00 0f 85 80 fb ff ff 48 c7 c6 f8 49 40 c0 48 c7 c7 80 41 3e c0 c6 05 14 87 11 00 01 e8 2c ea d6 d3 <0f> 0b e9 5f fb ff ff 8b 46 38 89 cf 31 c7 83 e7 c0 75 08 39 c1 0f [ 59.939253] RSP: 0018:ffffaafe08a03c10 EFLAGS: 00010286 [ 59.939255] RAX: 0000000000000000 RBX: ffff8f9112c246f0 RCX: 0000000000000001 [ 59.939256] RDX: 0000000080000001 RSI: ffffffff95086d8e RDI: 00000000ffffffff [ 59.939257] RBP: ffff8f9112c24680 R08: 000000009517be77 R09: 0000000000000000 [ 59.939258] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8f9112c24700 [ 59.939259] R13: ffff8f9112c24700 R14: 0000000000000000 R15: ffff8f9112c242a8 [ 59.939260] FS: 00007fc2cc7e5980(0000) GS:ffff8f9136c40000(0000) knlGS:0000000000000000 [ 59.939261] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 59.939262] CR2: 00007fc2cc815040 CR3: 000000021f10e003 CR4: 00000000003606e0 [ 59.939263] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 59.939264] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 59.939265] Call Trace: [ 59.939288] ? intel_engine_is_idle+0x64/0x160 [i915] [ 59.939323] ? intel_engine_dump+0x638/0x890 [i915] [ 59.939327] ? seq_printf+0x49/0x70 [ 59.939353] ? i915_engine_info+0xc8/0x100 [i915] [ 59.939356] ? drm_get_color_range_name+0x20/0x20 [ 59.939361] ? seq_read+0xf1/0x470 [ 59.939365] ? trace_hardirqs_on_caller+0xe0/0x1b0 [ 59.939370] ? full_proxy_read+0x51/0x80 [ 59.939389] ? __vfs_read+0x31/0x170 [ 59.939395] ? do_sys_open+0x13b/0x240 [ 59.939398] ? rcu_read_lock_sched_held+0x6f/0x80 [ 59.939401] ? vfs_read+0x9e/0x140 [ 59.939404] ? ksys_read+0x50/0xc0 [ 59.939409] ? do_syscall_64+0x55/0x190 [ 59.939412] ? entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 59.939420] irq event stamp: 552834 [ 59.939422] hardirqs last enabled at (552833): [<ffffffff940fc74c>] console_unlock+0x3fc/0x600 [ 59.939425] hardirqs last disabled at (552834): [<ffffffff94a0111c>] error_entry+0x7c/0x100 [ 59.939451] softirqs last enabled at (552614): [<ffffffffc02e0f53>] i915_request_add+0x2e3/0x7b0 [i915] [ 59.939470] softirqs last disabled at (552604): [<ffffffffc02e0ecb>] i915_request_add+0x25b/0x7b0 [i915] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106977 Fixes: dd0cf235d81f ("drm/i915: Speed up idle detection by kicking the tasklets") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Reviewed-by: Michel Thierry <michel.thierry@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180620135929.23956-1-chris@chris-wilson.co.uk
2018-06-20 20:59:29 +07:00
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
t->func(t->data);
tasklet_unlock(t);
}
drm/i915: Disable bh around call to tasklet The guc submission backends expects to only be run from (at least) softirq context, but during our intel_engine_is_idle() check we would call into the tasklet to make sure it was flushed. As this could occur from process context, occasionally we would be caught out using a wait_for_atomic() not from an atomic context: [ 59.939091] WARN_ON_ONCE((1) && !(preempt_count() != 0)) [ 59.939142] WARNING: CPU: 1 PID: 2901 at drivers/gpu/drm/i915/intel_guc_submission.c:615 guc_submission_tasklet+0x784/0xa90 [i915] [ 59.939143] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic i915 x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul snd_hda_intel crc32_pclmul snd_hda_codec ghash_clmulni_intel snd_hwdep snd_hda_core e1000e snd_pcm mei_me mei prime_numbers [ 59.939164] CPU: 1 PID: 2901 Comm: gem_exec_schedu Tainted: G U W 4.18.0-rc1-g93475d62c730-drmtip_67+ #1 [ 59.939165] Hardware name: System manufacturer System Product Name/Z170M-PLUS, BIOS 3610 03/29/2018 [ 59.939188] RIP: 0010:guc_submission_tasklet+0x784/0xa90 [i915] [ 59.939189] Code: fc ff ff 80 3d 2f 87 11 00 00 0f 85 80 fb ff ff 48 c7 c6 f8 49 40 c0 48 c7 c7 80 41 3e c0 c6 05 14 87 11 00 01 e8 2c ea d6 d3 <0f> 0b e9 5f fb ff ff 8b 46 38 89 cf 31 c7 83 e7 c0 75 08 39 c1 0f [ 59.939253] RSP: 0018:ffffaafe08a03c10 EFLAGS: 00010286 [ 59.939255] RAX: 0000000000000000 RBX: ffff8f9112c246f0 RCX: 0000000000000001 [ 59.939256] RDX: 0000000080000001 RSI: ffffffff95086d8e RDI: 00000000ffffffff [ 59.939257] RBP: ffff8f9112c24680 R08: 000000009517be77 R09: 0000000000000000 [ 59.939258] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8f9112c24700 [ 59.939259] R13: ffff8f9112c24700 R14: 0000000000000000 R15: ffff8f9112c242a8 [ 59.939260] FS: 00007fc2cc7e5980(0000) GS:ffff8f9136c40000(0000) knlGS:0000000000000000 [ 59.939261] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 59.939262] CR2: 00007fc2cc815040 CR3: 000000021f10e003 CR4: 00000000003606e0 [ 59.939263] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 59.939264] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 59.939265] Call Trace: [ 59.939288] ? intel_engine_is_idle+0x64/0x160 [i915] [ 59.939323] ? intel_engine_dump+0x638/0x890 [i915] [ 59.939327] ? seq_printf+0x49/0x70 [ 59.939353] ? i915_engine_info+0xc8/0x100 [i915] [ 59.939356] ? drm_get_color_range_name+0x20/0x20 [ 59.939361] ? seq_read+0xf1/0x470 [ 59.939365] ? trace_hardirqs_on_caller+0xe0/0x1b0 [ 59.939370] ? full_proxy_read+0x51/0x80 [ 59.939389] ? __vfs_read+0x31/0x170 [ 59.939395] ? do_sys_open+0x13b/0x240 [ 59.939398] ? rcu_read_lock_sched_held+0x6f/0x80 [ 59.939401] ? vfs_read+0x9e/0x140 [ 59.939404] ? ksys_read+0x50/0xc0 [ 59.939409] ? do_syscall_64+0x55/0x190 [ 59.939412] ? entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 59.939420] irq event stamp: 552834 [ 59.939422] hardirqs last enabled at (552833): [<ffffffff940fc74c>] console_unlock+0x3fc/0x600 [ 59.939425] hardirqs last disabled at (552834): [<ffffffff94a0111c>] error_entry+0x7c/0x100 [ 59.939451] softirqs last enabled at (552614): [<ffffffffc02e0f53>] i915_request_add+0x2e3/0x7b0 [i915] [ 59.939470] softirqs last disabled at (552604): [<ffffffffc02e0ecb>] i915_request_add+0x25b/0x7b0 [i915] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106977 Fixes: dd0cf235d81f ("drm/i915: Speed up idle detection by kicking the tasklets") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Reviewed-by: Michel Thierry <michel.thierry@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180620135929.23956-1-chris@chris-wilson.co.uk
2018-06-20 20:59:29 +07:00
local_bh_enable();
/* Otherwise flush the tasklet if it was on another cpu */
tasklet_unlock_wait(t);
if (READ_ONCE(engine->execlists.active))
return false;
}
/* ELSP is empty, but there are ready requests? E.g. after reset */
if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
return false;
/* Ring stopped? */
return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
drm/i915: Don't check #active_requests from i915_gem_wait_for_idle() i915_gem_wait_for_idle() is called from inside the shrinker, to ensure that we drain the last resources from the GPU in dire circumstances (OOM). As we may allocate whilst building a request, it is then possible to hit the shrinker with a request under construction, and so we must account for the incomplete request whilst waiting. In particular, we preincrement (in reserve_engine) the i915->gt.active_requests counter and mark the GPU as busy, therefore we can not use that counter for shortcircuiting the wait-for-idle. [ 950.859024] GEM_BUG_ON(i915->gt.active_requests) [ 950.859041] WARNING: CPU: 2 PID: 2178 at drivers/gpu/drm/i915/i915_gem.c:3615 i915_gem_wait_for_idle.part.56+0x166/0x4e0 [ 950.859041] Modules linked in: ccm tun fuse nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 nf_conntrack_ipv6 nf_defrag_ipv6 nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack libcrc32c ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_mangle iptable_security iptable_raw arc4 iwldvm mac80211 snd_hda_codec_hdmi snd_hda_codec_idt snd_hda_codec_generic snd_hda_intel snd_hda_codec btusb snd_hda_core btrtl btbcm iwlwifi snd_hwdep btintel bluetooth snd_seq snd_seq_device snd_pcm ecdh_generic x86_pkg_temp_thermal tpm_infineon coretemp tpm_tis crc32_pclmul wmi_bmof crc32c_intel iTCO_wdt hp_wmi snd_timer iTCO_vendor_support sparse_keymap tpm_tis_core mei_me cfg80211 [ 950.859082] snd joydev tpm mei rfkill pcspkr wmi soundcore lpc_ich hp_accel lis3lv02d input_polldev binfmt_misc e1000e ptp serio_raw pps_core [ 950.859094] CPU: 2 PID: 2178 Comm: gem_exec_nop Tainted: G U 4.15.0-rc2+ #900 [ 950.859102] Hardware name: Hewlett-Packard HP ProBook 6360b/1620, BIOS 68SCF Ver. B.42 12/29/2010 [ 950.859107] task: c5119cb4 task.stack: f3ccb8d8 [ 950.859112] EIP: i915_gem_wait_for_idle.part.56+0x166/0x4e0 [ 950.859113] EFLAGS: 00010296 CPU: 2 [ 950.859114] EAX: 00000024 EBX: f36c1888 ECX: f777a044 EDX: 00000007 [ 950.859115] ESI: f36c1888 EDI: edd53958 EBP: edd53970 ESP: edd53938 [ 950.859116] DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 [ 950.859117] CR0: 80050033 CR2: b7f39000 CR3: 2f2b3000 CR4: 000406d0 [ 950.859118] Call Trace: [ 950.859125] ? drm_printk+0x70/0x70 [ 950.859129] i915_gem_wait_for_idle+0x18/0x30 [ 950.859133] i915_gem_shrink+0x360/0x410 [ 950.859138] ? vmpressure+0xa8/0xf0 [ 950.859142] ? ktime_get+0x4a/0x100 [ 950.859147] i915_gem_shrink_all+0x21/0x40 [ 950.859151] i915_gem_shrinker_oom+0x23/0x130 [ 950.859156] notifier_call_chain+0x4e/0x70 [ 950.859160] __blocking_notifier_call_chain+0x2f/0x60 [ 950.859164] blocking_notifier_call_chain+0x11/0x20 [ 950.859169] out_of_memory+0x207/0x280 [ 950.859174] __alloc_pages_nodemask+0xd47/0xe60 [ 950.859179] new_slab+0x32d/0x450 [ 950.859183] ___slab_alloc.constprop.81+0x358/0x4e0 [ 950.859189] ? i915_sw_fence_await_dma_fence+0x53/0x160 [ 950.859193] ? __slab_free+0x1fe/0x310 [ 950.859197] ? native_sched_clock+0x1e/0xc0 [ 950.859201] ? i915_gem_request_alloc+0xcf/0x510 [ 950.859205] ? sched_clock+0x9/0x10 [ 950.859209] __slab_alloc.constprop.80+0x29/0x40 [ 950.859212] ? __slab_alloc.constprop.80+0x29/0x40 [ 950.859216] kmem_cache_alloc_trace+0x160/0x1a0 [ 950.859220] ? i915_sw_fence_await_dma_fence+0x53/0x160 [ 950.859224] i915_sw_fence_await_dma_fence+0x53/0x160 [ 950.859229] i915_gem_request_await_dma_fence+0x1eb/0x390 [ 950.859233] i915_gem_request_await_object+0xee/0x230 [ 950.859239] i915_gem_do_execbuffer+0xc16/0x1200 [ 950.859246] ? irqtime_account_irq+0x3e/0xc0 [ 950.859251] ? irq_exit+0x4f/0xb0 [ 950.859257] ? smp_apic_timer_interrupt+0x5f/0x110 [ 950.859261] ? apic_timer_interrupt+0x35/0x3c [ 950.859266] i915_gem_execbuffer2_ioctl+0x212/0x440 [ 950.859270] ? apic_timer_interrupt+0x35/0x3c [ 950.859274] ? i915_gem_do_execbuffer+0x1200/0x1200 [ 950.859279] ? insn_get_seg_base+0x1b/0x50 [ 950.859283] ? i915_gem_do_execbuffer+0x1200/0x1200 [ 950.859287] drm_ioctl_kernel+0x51/0xa0 [ 950.859291] drm_ioctl+0x2a3/0x350 [ 950.859294] ? i915_gem_do_execbuffer+0x1200/0x1200 [ 950.859300] ? sched_clock+0x9/0x10 [ 950.859303] ? drm_getunique+0x70/0x70 [ 950.859308] do_vfs_ioctl+0x7d/0x640 [ 950.859311] ? native_sched_clock+0x1e/0xc0 [ 950.859315] ? sched_clock+0x9/0x10 [ 950.859319] ? sched_clock_cpu+0x13/0x120 [ 950.859323] SyS_ioctl+0x4e/0x80 [ 950.859326] do_fast_syscall_32+0x75/0x250 [ 950.859331] ? irq_exit+0x4f/0xb0 [ 950.859334] entry_SYSENTER_32+0x47/0x71 [ 950.859338] EIP: 0xb7f81d11 [ 950.859339] EFLAGS: 00000296 CPU: 2 [ 950.859340] EAX: ffffffda EBX: 00000003 ECX: 40406469 EDX: bfde4c20 [ 950.859340] ESI: 00000003 EDI: 40406469 EBP: 00000003 ESP: bfde4b38 [ 950.859341] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b [ 950.859343] Code: e8 30 60 01 00 83 c4 10 83 c3 04 39 f3 75 e0 8b 45 d8 8b 80 14 37 00 00 85 c0 74 13 68 dd 33 e4 c0 68 49 6f e3 c0 e8 4a 55 be ff <0f> ff 5e 5f b8 fe ff ff 3f bb 0a 00 00 00 e8 b7 14 c4 ff 8b 15 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171212132148.8124-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2017-12-12 20:21:48 +07:00
/*
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (i915_reset_failed(i915))
return true;
/* Already parked (and passed an idleness test); must still be idle */
if (!READ_ONCE(i915->gt.awake))
return true;
for_each_engine(engine, i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
return true;
}
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id)
engine->set_default_submission(engine);
}
static bool reset_engines(struct drm_i915_private *i915)
{
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
return false;
return intel_gpu_reset(i915, ALL_ENGINES) == 0;
}
/**
* intel_engines_sanitize: called after the GPU has lost power
* @i915: the i915 device
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_engines_sanitize() if the GPU has not
* been reset results in much confusion!
*/
void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
if (!reset_engines(i915) && !force)
return;
for_each_engine(engine, i915, id)
intel_engine_reset(engine, false);
}
/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
* The GT is now idle and about to go to sleep (maybe never to wake again?).
* Time for us to tidy and put away our toys (release resources back to the
* system).
*/
void intel_engines_park(struct drm_i915_private *i915)
drm/i915: Split execlist priority queue into rbtree + linked list All the requests at the same priority are executed in FIFO order. They do not need to be stored in the rbtree themselves, as they are a simple list within a level. If we move the requests at one priority into a list, we can then reduce the rbtree to the set of priorities. This should keep the height of the rbtree small, as the number of active priorities can not exceed the number of active requests and should be typically only a few. Currently, we have ~2k possible different priority levels, that may increase to allow even more fine grained selection. Allocating those in advance seems a waste (and may be impossible), so we opt for allocating upon first use, and freeing after its requests are depleted. To avoid the possibility of an allocation failure causing us to lose a request, we preallocate the default priority (0) and bump any request to that priority if we fail to allocate it the appropriate plist. Having a request (that is ready to run, so not leading to corruption) execute out-of-order is better than leaking the request (and its dependency tree) entirely. There should be a benefit to reducing execlists_dequeue() to principally using a simple list (and reducing the frequency of both rbtree iteration and balancing on erase) but for typical workloads, request coalescing should be small enough that we don't notice any change. The main gain is from improving PI calls to schedule, and the explicit list within a level should make request unwinding simpler (we just need to insert at the head of the list rather than the tail and not have to make the rbtree search more complicated). v2: Avoid use-after-free when deleting a depleted priolist v3: Michał found the solution to handling the allocation failure gracefully. If we disable all priority scheduling following the allocation failure, those requests will be executed in fifo and we will ensure that this request and its dependencies are in strict fifo (even when it doesn't realise it is only a single list). Normal scheduling is restored once we know the device is idle, until the next failure! Suggested-by: Michał Wajdeczko <michal.wajdeczko@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170517121007.27224-8-chris@chris-wilson.co.uk
2017-05-17 19:10:03 +07:00
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
/* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine);
tasklet_kill(&engine->execlists.tasklet);
/*
* We are committed now to parking the engines, make sure there
* will be no more interrupts arriving later and the engines
* are truly idle.
*/
drm/i915: Restore the wait for idle engine after flushing interrupts So it appears that commit 5427f207852d ("drm/i915: Bump wait-times for the final CS interrupt before parking") was a little over optimistic in its belief that it had successfully waited for all residual activity on the engines before parking. Numerous sightings in CI since then of <7>[ 52.542886] [IGT] core_auth: executing <3>[ 52.561013] [drm:intel_engines_park [i915]] *ERROR* vcs0 is not idle before parking <7>[ 52.561215] intel_engines_park vcs0 <7>[ 52.561229] intel_engines_park current seqno 98, last 98, hangcheck 0 [-247449 ms], inflight 0 <7>[ 52.561238] intel_engines_park Reset count: 0 <7>[ 52.561266] intel_engines_park Requests: <7>[ 52.561363] intel_engines_park RING_START: 0x00000000 [0x00000000] <7>[ 52.561377] intel_engines_park RING_HEAD: 0x00000000 [0x00000000] <7>[ 52.561390] intel_engines_park RING_TAIL: 0x00000000 [0x00000000] <7>[ 52.561406] intel_engines_park RING_CTL: 0x00000000 <7>[ 52.561422] intel_engines_park RING_MODE: 0x00000200 [idle] <7>[ 52.561442] intel_engines_park ACTHD: 0x00000000_00000000 <7>[ 52.561459] intel_engines_park BBADDR: 0x00000000_00000000 <7>[ 52.561474] intel_engines_park Execlist status: 0x00000301 00000000 <7>[ 52.561489] intel_engines_park Execlist CSB read 5 [5 cached], write 5 [5 from hws], interrupt posted? no <7>[ 52.561500] intel_engines_park ELSP[0] idle <7>[ 52.561510] intel_engines_park ELSP[1] idle <7>[ 52.561519] intel_engines_park HW active? 0x0 <7>[ 52.561608] intel_engines_park Idle? yes <7>[ 52.561617] intel_engines_park on Braswell, which indicates that the engine just needs that little bit longer after flushing the tasklet to settle. So give it a few more milliseconds before declaring an err and applying the emergency brake. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103479 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171110112550.28909-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-10 18:25:50 +07:00
if (wait_for(intel_engine_is_idle(engine), 10)) {
struct drm_printer p = drm_debug_printer(__func__);
drm/i915: Restore the wait for idle engine after flushing interrupts So it appears that commit 5427f207852d ("drm/i915: Bump wait-times for the final CS interrupt before parking") was a little over optimistic in its belief that it had successfully waited for all residual activity on the engines before parking. Numerous sightings in CI since then of <7>[ 52.542886] [IGT] core_auth: executing <3>[ 52.561013] [drm:intel_engines_park [i915]] *ERROR* vcs0 is not idle before parking <7>[ 52.561215] intel_engines_park vcs0 <7>[ 52.561229] intel_engines_park current seqno 98, last 98, hangcheck 0 [-247449 ms], inflight 0 <7>[ 52.561238] intel_engines_park Reset count: 0 <7>[ 52.561266] intel_engines_park Requests: <7>[ 52.561363] intel_engines_park RING_START: 0x00000000 [0x00000000] <7>[ 52.561377] intel_engines_park RING_HEAD: 0x00000000 [0x00000000] <7>[ 52.561390] intel_engines_park RING_TAIL: 0x00000000 [0x00000000] <7>[ 52.561406] intel_engines_park RING_CTL: 0x00000000 <7>[ 52.561422] intel_engines_park RING_MODE: 0x00000200 [idle] <7>[ 52.561442] intel_engines_park ACTHD: 0x00000000_00000000 <7>[ 52.561459] intel_engines_park BBADDR: 0x00000000_00000000 <7>[ 52.561474] intel_engines_park Execlist status: 0x00000301 00000000 <7>[ 52.561489] intel_engines_park Execlist CSB read 5 [5 cached], write 5 [5 from hws], interrupt posted? no <7>[ 52.561500] intel_engines_park ELSP[0] idle <7>[ 52.561510] intel_engines_park ELSP[1] idle <7>[ 52.561519] intel_engines_park HW active? 0x0 <7>[ 52.561608] intel_engines_park Idle? yes <7>[ 52.561617] intel_engines_park on Braswell, which indicates that the engine just needs that little bit longer after flushing the tasklet to settle. So give it a few more milliseconds before declaring an err and applying the emergency brake. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103479 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171110112550.28909-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-11-10 18:25:50 +07:00
dev_err(i915->drm.dev,
"%s is not idle before parking\n",
engine->name);
intel_engine_dump(engine, &p, NULL);
}
drm/i915/execlists: Set queue priority from secondary port We can refine our current execlists->queue_priority if we inspect ELSP[1] rather than the head of the unsubmitted queue. Currently, we use the unsubmitted queue and say that if a subsequent request is more important than the current queue, we will rerun the submission tasklet to evaluate the need for preemption. However, we only want to preempt if we need to jump ahead of a currently executing request in ELSP. The second reason for running the submission tasklet is amalgamate requests into the active context on ELSP[0] to avoid a stall when ELSP[0] drains. (Though repeatedly amalgamating requests into the active context and triggering many lite-restore is off question gain, the goal really is to put a context into ELSP[1] to cover the interrupt.) So if instead of looking at the head of the queue, we look at the context in ELSP[1] we can answer both of the questions more accurately -- we don't need to rerun the submission tasklet unless our new request is important enough to feed into, at least, ELSP[1]. v2: Add some comments from the discussion with Tvrtko. v3: More commentary to cross-reference queue_request() References: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180411103929.27374-1-chris@chris-wilson.co.uk
2018-04-11 17:39:29 +07:00
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
drm/i915/execlists: Set queue priority from secondary port We can refine our current execlists->queue_priority if we inspect ELSP[1] rather than the head of the unsubmitted queue. Currently, we use the unsubmitted queue and say that if a subsequent request is more important than the current queue, we will rerun the submission tasklet to evaluate the need for preemption. However, we only want to preempt if we need to jump ahead of a currently executing request in ELSP. The second reason for running the submission tasklet is amalgamate requests into the active context on ELSP[0] to avoid a stall when ELSP[0] drains. (Though repeatedly amalgamating requests into the active context and triggering many lite-restore is off question gain, the goal really is to put a context into ELSP[1] to cover the interrupt.) So if instead of looking at the head of the queue, we look at the context in ELSP[1] we can answer both of the questions more accurately -- we don't need to rerun the submission tasklet unless our new request is important enough to feed into, at least, ELSP[1]. v2: Add some comments from the discussion with Tvrtko. v3: More commentary to cross-reference queue_request() References: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180411103929.27374-1-chris@chris-wilson.co.uk
2018-04-11 17:39:29 +07:00
if (engine->park)
engine->park(engine);
if (engine->pinned_default_state) {
i915_gem_object_unpin_map(engine->default_state);
engine->pinned_default_state = NULL;
}
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
drm/i915: Split execlist priority queue into rbtree + linked list All the requests at the same priority are executed in FIFO order. They do not need to be stored in the rbtree themselves, as they are a simple list within a level. If we move the requests at one priority into a list, we can then reduce the rbtree to the set of priorities. This should keep the height of the rbtree small, as the number of active priorities can not exceed the number of active requests and should be typically only a few. Currently, we have ~2k possible different priority levels, that may increase to allow even more fine grained selection. Allocating those in advance seems a waste (and may be impossible), so we opt for allocating upon first use, and freeing after its requests are depleted. To avoid the possibility of an allocation failure causing us to lose a request, we preallocate the default priority (0) and bump any request to that priority if we fail to allocate it the appropriate plist. Having a request (that is ready to run, so not leading to corruption) execute out-of-order is better than leaking the request (and its dependency tree) entirely. There should be a benefit to reducing execlists_dequeue() to principally using a simple list (and reducing the frequency of both rbtree iteration and balancing on erase) but for typical workloads, request coalescing should be small enough that we don't notice any change. The main gain is from improving PI calls to schedule, and the explicit list within a level should make request unwinding simpler (we just need to insert at the head of the list rather than the tail and not have to make the rbtree search more complicated). v2: Avoid use-after-free when deleting a depleted priolist v3: Michał found the solution to handling the allocation failure gracefully. If we disable all priority scheduling following the allocation failure, those requests will be executed in fifo and we will ensure that this request and its dependencies are in strict fifo (even when it doesn't realise it is only a single list). Normal scheduling is restored once we know the device is idle, until the next failure! Suggested-by: Michał Wajdeczko <michal.wajdeczko@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170517121007.27224-8-chris@chris-wilson.co.uk
2017-05-17 19:10:03 +07:00
}
i915->gt.active_engines = 0;
drm/i915: Split execlist priority queue into rbtree + linked list All the requests at the same priority are executed in FIFO order. They do not need to be stored in the rbtree themselves, as they are a simple list within a level. If we move the requests at one priority into a list, we can then reduce the rbtree to the set of priorities. This should keep the height of the rbtree small, as the number of active priorities can not exceed the number of active requests and should be typically only a few. Currently, we have ~2k possible different priority levels, that may increase to allow even more fine grained selection. Allocating those in advance seems a waste (and may be impossible), so we opt for allocating upon first use, and freeing after its requests are depleted. To avoid the possibility of an allocation failure causing us to lose a request, we preallocate the default priority (0) and bump any request to that priority if we fail to allocate it the appropriate plist. Having a request (that is ready to run, so not leading to corruption) execute out-of-order is better than leaking the request (and its dependency tree) entirely. There should be a benefit to reducing execlists_dequeue() to principally using a simple list (and reducing the frequency of both rbtree iteration and balancing on erase) but for typical workloads, request coalescing should be small enough that we don't notice any change. The main gain is from improving PI calls to schedule, and the explicit list within a level should make request unwinding simpler (we just need to insert at the head of the list rather than the tail and not have to make the rbtree search more complicated). v2: Avoid use-after-free when deleting a depleted priolist v3: Michał found the solution to handling the allocation failure gracefully. If we disable all priority scheduling following the allocation failure, those requests will be executed in fifo and we will ensure that this request and its dependencies are in strict fifo (even when it doesn't realise it is only a single list). Normal scheduling is restored once we know the device is idle, until the next failure! Suggested-by: Michał Wajdeczko <michal.wajdeczko@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170517121007.27224-8-chris@chris-wilson.co.uk
2017-05-17 19:10:03 +07:00
}
/**
* intel_engines_unpark: called when the GT is transitioning from idle->busy
* @i915: the i915 device
*
* The GT was idle and now about to fire up with some new user requests.
*/
void intel_engines_unpark(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
void *map;
/* Pin the default state for fast resets from atomic context. */
map = NULL;
if (engine->default_state)
map = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
if (engine->unpark)
engine->unpark(engine);
intel_engine_init_hangcheck(engine);
}
}
/**
* intel_engine_lost_context: called when the GPU is reset into unknown state
* @engine: the engine
*
* We have either reset the GPU or otherwise about to lose state tracking of
* the current GPU logical state (e.g. suspend). On next use, it is therefore
* imperative that we make no presumptions about the current state and load
* from scratch.
*/
void intel_engine_lost_context(struct intel_engine_cs *engine)
{
struct intel_context *ce;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
ce = fetch_and_zero(&engine->last_retired_context);
if (ce)
intel_context_unpin(ce);
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 2:
return false; /* uses physical not virtual addresses */
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
return true;
}
}
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int which;
which = 0;
for_each_engine(engine, i915, id)
if (engine->default_state)
which |= BIT(engine->uabi_class);
return which;
}
static int print_sched_attr(struct drm_i915_private *i915,
const struct i915_sched_attr *attr,
char *buf, int x, int len)
{
if (attr->priority == I915_PRIORITY_INVALID)
return x;
x += snprintf(buf + x, len - x,
" prio=%d", attr->priority);
return x;
}
static void print_request(struct drm_printer *m,
struct i915_request *rq,
const char *prefix)
{
drm/i915: Check rq->timeline before deference Not only is the context suspect to disappearing, but so is it's timeline. Under a lockless inspection of the requests for debugging from intel_engine_dump(), the context may already have been freed and we have to check before chasing the dangling pointer. [28033.681755] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel snd_pcm mei_me mei i915 r8169 mii prime_numbers i2c_hid [28033.681796] CPU: 3 PID: 3058 Comm: gem_exec_schedu Tainted: G U 4.16.0-rc5+ #9 [28033.681804] Hardware name: Acer Aspire E5-575G/Ironman_SK , BIOS V1.12 08/02/2016 [28033.681834] RIP: 0010:print_request+0x2b/0xb0 [i915] [28033.681840] RSP: 0018:ffffc90004afbc18 EFLAGS: 00010202 [28033.681847] RAX: 6b6b6b6b6b6b6b6b RBX: ffff8801921b5a40 RCX: 0000000000000006 [28033.681854] RDX: ffffc90004afbc60 RSI: ffff8801921b5a40 RDI: 0000000000000004 [28033.681861] RBP: ffffc90004afbd80 R08: 0000000000000000 R09: 0000000000000001 [28033.681868] R10: ffffc90004afbbd0 R11: ffffc90004afbc73 R12: ffffc90004afbc60 [28033.681875] R13: ffffc90004afbd80 R14: ffff8801d40ec670 R15: ffff8801921b5a40 [28033.681883] FS: 00007fbba5f6c8c0(0000) GS:ffff8801e8400000(0000) knlGS:0000000000000000 [28033.681891] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [28033.681897] CR2: 00007fbba5f8f000 CR3: 00000001b2efa002 CR4: 00000000003606e0 [28033.681904] Call Trace: [28033.681932] intel_engine_print_registers+0x6a7/0x930 [i915] [28033.681962] intel_engine_dump+0x30d/0x740 [i915] [28033.681971] ? seq_printf+0x3a/0x50 [28033.681995] i915_engine_info+0xb8/0xe0 [i915] [28033.682003] ? drm_get_color_range_name+0x20/0x20 [28033.682010] seq_read+0xe1/0x440 [28033.682018] full_proxy_read+0x51/0x80 [28033.682025] __vfs_read+0x21/0x130 [28033.682031] ? do_sys_open+0x134/0x220 [28033.682037] ? kmem_cache_free+0x177/0x2b0 [28033.682043] vfs_read+0xa1/0x150 [28033.682049] SyS_read+0x40/0xa0 [28033.682055] do_syscall_64+0x6b/0x1b0 [28033.682063] entry_SYSCALL_64_after_hwframe+0x42/0xb7 [28033.682069] RIP: 0033:0x7fbba4655d11 [28033.682074] RSP: 002b:00007ffd8c49da58 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [28033.682082] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbba4655d11 [28033.682089] RDX: 000000000000003f RSI: 00005647bfbfc260 RDI: 0000000000000006 [28033.682096] RBP: 000000000000003f R08: 00000000ffffffff R09: 0000000000000000 [28033.682104] R10: 0000000000000000 R11: 0000000000000246 R12: 00005647bfbfc260 [28033.682111] R13: 0000000000000006 R14: 0000000000000000 R15: 00005647bfbfc260 [28033.682119] Code: 41 55 41 54 49 89 d4 55 53 48 89 fd 48 8b 86 c8 00 00 00 48 8b 3d d6 1e 14 e2 48 89 f3 48 2b be a8 02 00 00 48 8b 80 b0 00 00 00 <4c> 8b 68 18 e8 bc 80 02 e1 8b 8b 70 02 00 00 8b b3 28 02 00 00 [28033.682206] RIP: print_request+0x2b/0xb0 [i915] RSP: ffffc90004afbc18 Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180314101630.8933-1-chris@chris-wilson.co.uk
2018-03-14 17:16:30 +07:00
const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
char buf[80] = "";
int x = 0;
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
drm/i915: Check rq->timeline before deference Not only is the context suspect to disappearing, but so is it's timeline. Under a lockless inspection of the requests for debugging from intel_engine_dump(), the context may already have been freed and we have to check before chasing the dangling pointer. [28033.681755] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel snd_pcm mei_me mei i915 r8169 mii prime_numbers i2c_hid [28033.681796] CPU: 3 PID: 3058 Comm: gem_exec_schedu Tainted: G U 4.16.0-rc5+ #9 [28033.681804] Hardware name: Acer Aspire E5-575G/Ironman_SK , BIOS V1.12 08/02/2016 [28033.681834] RIP: 0010:print_request+0x2b/0xb0 [i915] [28033.681840] RSP: 0018:ffffc90004afbc18 EFLAGS: 00010202 [28033.681847] RAX: 6b6b6b6b6b6b6b6b RBX: ffff8801921b5a40 RCX: 0000000000000006 [28033.681854] RDX: ffffc90004afbc60 RSI: ffff8801921b5a40 RDI: 0000000000000004 [28033.681861] RBP: ffffc90004afbd80 R08: 0000000000000000 R09: 0000000000000001 [28033.681868] R10: ffffc90004afbbd0 R11: ffffc90004afbc73 R12: ffffc90004afbc60 [28033.681875] R13: ffffc90004afbd80 R14: ffff8801d40ec670 R15: ffff8801921b5a40 [28033.681883] FS: 00007fbba5f6c8c0(0000) GS:ffff8801e8400000(0000) knlGS:0000000000000000 [28033.681891] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [28033.681897] CR2: 00007fbba5f8f000 CR3: 00000001b2efa002 CR4: 00000000003606e0 [28033.681904] Call Trace: [28033.681932] intel_engine_print_registers+0x6a7/0x930 [i915] [28033.681962] intel_engine_dump+0x30d/0x740 [i915] [28033.681971] ? seq_printf+0x3a/0x50 [28033.681995] i915_engine_info+0xb8/0xe0 [i915] [28033.682003] ? drm_get_color_range_name+0x20/0x20 [28033.682010] seq_read+0xe1/0x440 [28033.682018] full_proxy_read+0x51/0x80 [28033.682025] __vfs_read+0x21/0x130 [28033.682031] ? do_sys_open+0x134/0x220 [28033.682037] ? kmem_cache_free+0x177/0x2b0 [28033.682043] vfs_read+0xa1/0x150 [28033.682049] SyS_read+0x40/0xa0 [28033.682055] do_syscall_64+0x6b/0x1b0 [28033.682063] entry_SYSCALL_64_after_hwframe+0x42/0xb7 [28033.682069] RIP: 0033:0x7fbba4655d11 [28033.682074] RSP: 002b:00007ffd8c49da58 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [28033.682082] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbba4655d11 [28033.682089] RDX: 000000000000003f RSI: 00005647bfbfc260 RDI: 0000000000000006 [28033.682096] RBP: 000000000000003f R08: 00000000ffffffff R09: 0000000000000000 [28033.682104] R10: 0000000000000000 R11: 0000000000000246 R12: 00005647bfbfc260 [28033.682111] R13: 0000000000000006 R14: 0000000000000000 R15: 00005647bfbfc260 [28033.682119] Code: 41 55 41 54 49 89 d4 55 53 48 89 fd 48 8b 86 c8 00 00 00 48 8b 3d d6 1e 14 e2 48 89 f3 48 2b be a8 02 00 00 48 8b 80 b0 00 00 00 <4c> 8b 68 18 e8 bc 80 02 e1 8b 8b 70 02 00 00 8b b3 28 02 00 00 [28033.682206] RIP: print_request+0x2b/0xb0 [i915] RSP: ffffc90004afbc18 Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180314101630.8933-1-chris@chris-wilson.co.uk
2018-03-14 17:16:30 +07:00
drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
drm/i915: Replace global breadcrumbs with per-context interrupt tracking A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd"), the issue of handling multiple clients waiting in parallel was brought to our attention. The requirement was that every client should be woken immediately upon its request being signaled, without incurring any cpu overhead. To handle certain fragility of our hw meant that we could not do a simple check inside the irq handler (some generations required almost unbounded delays before we could be sure of seqno coherency) and so request completion checking required delegation. Before commit 688e6c725816, the solution was simple. Every client waiting on a request would be woken on every interrupt and each would do a heavyweight check to see if their request was complete. Commit 688e6c725816 introduced an rbtree so that only the earliest waiter on the global timeline would woken, and would wake the next and so on. (Along with various complications to handle requests being reordered along the global timeline, and also a requirement for kthread to provide a delegate for fence signaling that had no process context.) The global rbtree depends on knowing the execution timeline (and global seqno). Without knowing that order, we must instead check all contexts queued to the HW to see which may have advanced. We trim that list by only checking queued contexts that are being waited on, but still we keep a list of all active contexts and their active signalers that we inspect from inside the irq handler. By moving the waiters onto the fence signal list, we can combine the client wakeup with the dma_fence signaling (a dramatic reduction in complexity, but does require the HW being coherent, the seqno must be visible from the cpu before the interrupt is raised - we keep a timer backup just in case). Having previously fixed all the issues with irq-seqno serialisation (by inserting delays onto the GPU after each request instead of random delays on the CPU after each interrupt), we can rely on the seqno state to perfom direct wakeups from the interrupt handler. This allows us to preserve our single context switch behaviour of the current routine, with the only downside that we lose the RT priority sorting of wakeups. In general, direct wakeup latency of multiple clients is about the same (about 10% better in most cases) with a reduction in total CPU time spent in the waiter (about 20-50% depending on gen). Average herd behaviour is improved, but at the cost of not delegating wakeups on task_prio. v2: Capture fence signaling state for error state and add comments to warm even the most cold of hearts. v3: Check if the request is still active before busywaiting v4: Reduce the amount of pointer misdirection with list_for_each_safe and using a local i915_request variable inside the loops v5: Add a missing pluralisation to a purely informative selftest message. References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
drm/i915: Check rq->timeline before deference Not only is the context suspect to disappearing, but so is it's timeline. Under a lockless inspection of the requests for debugging from intel_engine_dump(), the context may already have been freed and we have to check before chasing the dangling pointer. [28033.681755] Modules linked in: vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic x86_pkg_temp_thermal intel_powerclamp coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel snd_pcm mei_me mei i915 r8169 mii prime_numbers i2c_hid [28033.681796] CPU: 3 PID: 3058 Comm: gem_exec_schedu Tainted: G U 4.16.0-rc5+ #9 [28033.681804] Hardware name: Acer Aspire E5-575G/Ironman_SK , BIOS V1.12 08/02/2016 [28033.681834] RIP: 0010:print_request+0x2b/0xb0 [i915] [28033.681840] RSP: 0018:ffffc90004afbc18 EFLAGS: 00010202 [28033.681847] RAX: 6b6b6b6b6b6b6b6b RBX: ffff8801921b5a40 RCX: 0000000000000006 [28033.681854] RDX: ffffc90004afbc60 RSI: ffff8801921b5a40 RDI: 0000000000000004 [28033.681861] RBP: ffffc90004afbd80 R08: 0000000000000000 R09: 0000000000000001 [28033.681868] R10: ffffc90004afbbd0 R11: ffffc90004afbc73 R12: ffffc90004afbc60 [28033.681875] R13: ffffc90004afbd80 R14: ffff8801d40ec670 R15: ffff8801921b5a40 [28033.681883] FS: 00007fbba5f6c8c0(0000) GS:ffff8801e8400000(0000) knlGS:0000000000000000 [28033.681891] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [28033.681897] CR2: 00007fbba5f8f000 CR3: 00000001b2efa002 CR4: 00000000003606e0 [28033.681904] Call Trace: [28033.681932] intel_engine_print_registers+0x6a7/0x930 [i915] [28033.681962] intel_engine_dump+0x30d/0x740 [i915] [28033.681971] ? seq_printf+0x3a/0x50 [28033.681995] i915_engine_info+0xb8/0xe0 [i915] [28033.682003] ? drm_get_color_range_name+0x20/0x20 [28033.682010] seq_read+0xe1/0x440 [28033.682018] full_proxy_read+0x51/0x80 [28033.682025] __vfs_read+0x21/0x130 [28033.682031] ? do_sys_open+0x134/0x220 [28033.682037] ? kmem_cache_free+0x177/0x2b0 [28033.682043] vfs_read+0xa1/0x150 [28033.682049] SyS_read+0x40/0xa0 [28033.682055] do_syscall_64+0x6b/0x1b0 [28033.682063] entry_SYSCALL_64_after_hwframe+0x42/0xb7 [28033.682069] RIP: 0033:0x7fbba4655d11 [28033.682074] RSP: 002b:00007ffd8c49da58 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [28033.682082] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbba4655d11 [28033.682089] RDX: 000000000000003f RSI: 00005647bfbfc260 RDI: 0000000000000006 [28033.682096] RBP: 000000000000003f R08: 00000000ffffffff R09: 0000000000000000 [28033.682104] R10: 0000000000000000 R11: 0000000000000246 R12: 00005647bfbfc260 [28033.682111] R13: 0000000000000006 R14: 0000000000000000 R15: 00005647bfbfc260 [28033.682119] Code: 41 55 41 54 49 89 d4 55 53 48 89 fd 48 8b 86 c8 00 00 00 48 8b 3d d6 1e 14 e2 48 89 f3 48 2b be a8 02 00 00 48 8b 80 b0 00 00 00 <4c> 8b 68 18 e8 bc 80 02 e1 8b 8b 70 02 00 00 8b b3 28 02 00 00 [28033.682206] RIP: print_request+0x2b/0xb0 [i915] RSP: ffffc90004afbc18 Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180314101630.8933-1-chris@chris-wilson.co.uk
2018-03-14 17:16:30 +07:00
name);
}
drm/i915: Show HWSP in intel_engine_dump() Looking at a CI failure with an ominous line of [ 362.550715] hangcheck current seqno ffffff6b, last ffffff8c, hangcheck ffffff6b [6016 ms], inflight 118 with no apparent cause for the seqno to be negative, left me wondering if someone had scribbled over the HWSP. So include the HWSP in the engine dump to see if there are more signs of random scribbling. v2: Fix row pointer, i is now incremented by 8 so doesn't need scaling by 8, and we don't need to keep volatile here as the status_page isn't marked up as volatile itself. v3: Use hexdump, with suppression of identical lines. (Tvrtko) Which results in HWSP: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 00000040 00000001 00000000 00000018 00000002 00000001 00000000 00000018 00000000 00000060 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000003 00000080 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 000000c0 00000002 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000e0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * instead of 128 lines of mostly 0s. v4: Tidy up the locals Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171222182521.18106-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-12-23 01:25:21 +07:00
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
const void *prev = NULL;
bool skip = false;
size_t pos;
for (pos = 0; pos < len; pos += rowsize) {
char line[128];
if (prev && !memcmp(prev, buf + pos, rowsize)) {
if (!skip) {
drm_printf(m, "*\n");
skip = true;
}
continue;
}
WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
rowsize, sizeof(u32),
line, sizeof(line),
false) >= sizeof(line));
drm_printf(m, "[%04zx] %s\n", pos, line);
drm/i915: Show HWSP in intel_engine_dump() Looking at a CI failure with an ominous line of [ 362.550715] hangcheck current seqno ffffff6b, last ffffff8c, hangcheck ffffff6b [6016 ms], inflight 118 with no apparent cause for the seqno to be negative, left me wondering if someone had scribbled over the HWSP. So include the HWSP in the engine dump to see if there are more signs of random scribbling. v2: Fix row pointer, i is now incremented by 8 so doesn't need scaling by 8, and we don't need to keep volatile here as the status_page isn't marked up as volatile itself. v3: Use hexdump, with suppression of identical lines. (Tvrtko) Which results in HWSP: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 00000040 00000001 00000000 00000018 00000002 00000001 00000000 00000018 00000000 00000060 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000003 00000080 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 000000c0 00000002 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000e0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * instead of 128 lines of mostly 0s. v4: Tidy up the locals Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171222182521.18106-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-12-23 01:25:21 +07:00
prev = buf + pos;
skip = false;
}
}
static void intel_engine_print_registers(const struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *dev_priv = engine->i915;
const struct intel_engine_execlists * const execlists =
&engine->execlists;
u64 addr;
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
ENGINE_READ(engine, RING_MI_MODE),
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (INTEL_GEN(dev_priv) >= 6) {
drm_printf(m, "\tRING_IMR: %08x\n",
ENGINE_READ(engine, RING_IMR));
}
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (INTEL_GEN(dev_priv) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEHR));
} else {
drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int idx;
u8 read, write;
drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
read, write,
GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
if (read >= GEN8_CSB_ENTRIES)
read = 0;
if (write >= GEN8_CSB_ENTRIES)
write = 0;
if (read > write)
write += GEN8_CSB_ENTRIES;
while (read < write) {
idx = ++read % GEN8_CSB_ENTRIES;
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
idx,
hws[idx * 2],
ENGINE_READ_IDX(engine,
RING_CONTEXT_STATUS_BUF_LO,
idx),
hws[idx * 2 + 1],
ENGINE_READ_IDX(engine,
RING_CONTEXT_STATUS_BUF_HI,
idx));
}
rcu_read_lock();
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
struct i915_request *rq;
unsigned int count;
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
i915_ggtt_offset(rq->ring->vma),
rq->timeline->hwsp_offset,
hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
}
}
drm/i915: Filter out spurious execlists context-switch interrupts Back in commit a4b2b01523a8 ("drm/i915: Don't mark an execlists context-switch when idle") we noticed the presence of late context-switch interrupts. We were able to filter those out by looking at whether the ELSP remained active, but in commit beecec901790 ("drm/i915/execlists: Preemption!") that became problematic as we now anticipate receiving a context-switch event for preemption while ELSP may be empty. To restore the spurious interrupt suppression, add a counter for the expected number of pending context-switches and skip if we do not need to handle this interrupt to make forward progress. v2: Don't forget to switch on for preempt. v3: Reduce the counter to a on/off boolean tracker. Declare the HW as active when we first submit, and idle after the final completion event (with which we confirm the HW says it is idle), and track each source of activity separately. With a finite number of sources, it should aide us in debugging which gets stuck. Fixes: beecec901790 ("drm/i915/execlists: Preemption!") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171023213237.26536-3-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
2017-10-24 04:32:36 +07:00
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
void *ring;
int size;
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
size += rq->ring->size;
ring = kmalloc(size, GFP_ATOMIC);
if (ring) {
const void *vaddr = rq->ring->vaddr;
unsigned int head = rq->head;
unsigned int len = 0;
if (rq->tail < head) {
len = rq->ring->size - head;
memcpy(ring, vaddr + head, len);
head = 0;
}
memcpy(ring + len, vaddr + head, size - len);
hexdump(m, ring, size);
kfree(ring);
}
}
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
if (header) {
va_list ap;
va_start(ap, header);
drm_vprintf(m, header, &ap);
va_end(ap);
}
if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
engine->hangcheck.last_seqno,
engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
rcu_read_lock();
drm_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
drm_printf(m, "\t\tring->head: 0x%08x\n",
rq->ring->head);
drm_printf(m, "\t\tring->tail: 0x%08x\n",
rq->ring->tail);
drm_printf(m, "\t\tring->emit: 0x%08x\n",
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
rq->timeline->hwsp_offset);
print_request_ring(m, rq);
}
rcu_read_unlock();
wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
if (wakeref) {
intel_engine_print_registers(engine, m);
intel_runtime_pm_put(engine->i915, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
intel_execlists_show_requests(engine, m, print_request, 8);
drm/i915: Show HWSP in intel_engine_dump() Looking at a CI failure with an ominous line of [ 362.550715] hangcheck current seqno ffffff6b, last ffffff8c, hangcheck ffffff6b [6016 ms], inflight 118 with no apparent cause for the seqno to be negative, left me wondering if someone had scribbled over the HWSP. So include the HWSP in the engine dump to see if there are more signs of random scribbling. v2: Fix row pointer, i is now incremented by 8 so doesn't need scaling by 8, and we don't need to keep volatile here as the status_page isn't marked up as volatile itself. v3: Use hexdump, with suppression of identical lines. (Tvrtko) Which results in HWSP: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 00000040 00000001 00000000 00000018 00000002 00000001 00000000 00000018 00000000 00000060 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000003 00000080 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 000000c0 00000002 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000e0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * instead of 128 lines of mostly 0s. v4: Tidy up the locals Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171222182521.18106-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-12-23 01:25:21 +07:00
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
drm/i915: Show HWSP in intel_engine_dump() Looking at a CI failure with an ominous line of [ 362.550715] hangcheck current seqno ffffff6b, last ffffff8c, hangcheck ffffff6b [6016 ms], inflight 118 with no apparent cause for the seqno to be negative, left me wondering if someone had scribbled over the HWSP. So include the HWSP in the engine dump to see if there are more signs of random scribbling. v2: Fix row pointer, i is now incremented by 8 so doesn't need scaling by 8, and we don't need to keep volatile here as the status_page isn't marked up as volatile itself. v3: Use hexdump, with suppression of identical lines. (Tvrtko) Which results in HWSP: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 00000040 00000001 00000000 00000018 00000002 00000001 00000000 00000018 00000000 00000060 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000003 00000080 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * 000000c0 00000002 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000e0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 * instead of 128 lines of mostly 0s. v4: Tidy up the locals Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171222182521.18106-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
2017-12-23 01:25:21 +07:00
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
drm/i915: Replace global breadcrumbs with per-context interrupt tracking A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd"), the issue of handling multiple clients waiting in parallel was brought to our attention. The requirement was that every client should be woken immediately upon its request being signaled, without incurring any cpu overhead. To handle certain fragility of our hw meant that we could not do a simple check inside the irq handler (some generations required almost unbounded delays before we could be sure of seqno coherency) and so request completion checking required delegation. Before commit 688e6c725816, the solution was simple. Every client waiting on a request would be woken on every interrupt and each would do a heavyweight check to see if their request was complete. Commit 688e6c725816 introduced an rbtree so that only the earliest waiter on the global timeline would woken, and would wake the next and so on. (Along with various complications to handle requests being reordered along the global timeline, and also a requirement for kthread to provide a delegate for fence signaling that had no process context.) The global rbtree depends on knowing the execution timeline (and global seqno). Without knowing that order, we must instead check all contexts queued to the HW to see which may have advanced. We trim that list by only checking queued contexts that are being waited on, but still we keep a list of all active contexts and their active signalers that we inspect from inside the irq handler. By moving the waiters onto the fence signal list, we can combine the client wakeup with the dma_fence signaling (a dramatic reduction in complexity, but does require the HW being coherent, the seqno must be visible from the cpu before the interrupt is raised - we keep a timer backup just in case). Having previously fixed all the issues with irq-seqno serialisation (by inserting delays onto the GPU after each request instead of random delays on the CPU after each interrupt), we can rely on the seqno state to perfom direct wakeups from the interrupt handler. This allows us to preserve our single context switch behaviour of the current routine, with the only downside that we lose the RT priority sorting of wakeups. In general, direct wakeup latency of multiple clients is about the same (about 10% better in most cases) with a reduction in total CPU time spent in the waiter (about 20-50% depending on gen). Average herd behaviour is improved, but at the cost of not delegating wakeups on task_prio. v2: Capture fence signaling state for error state and add comments to warm even the most cold of hearts. v3: Check if the request is still active before busywaiting v4: Reduce the amount of pointer misdirection with list_for_each_safe and using a local i915_request variable inside the loops v5: Add a missing pluralisation to a purely informative selftest message. References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
intel_engine_print_breadcrumbs(engine, m);
}
drm/i915/pmu: Expose a PMU interface for perf queries From: Chris Wilson <chris@chris-wilson.co.uk> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> From: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> The first goal is to be able to measure GPU (and invidual ring) busyness without having to poll registers from userspace. (Which not only incurs holding the forcewake lock indefinitely, perturbing the system, but also runs the risk of hanging the machine.) As an alternative we can use the perf event counter interface to sample the ring registers periodically and send those results to userspace. Functionality we are exporting to userspace is via the existing perf PMU API and can be exercised via the existing tools. For example: perf stat -a -e i915/rcs0-busy/ -I 1000 Will print the render engine busynnes once per second. All the performance counters can be enumerated (perf list) and have their unit of measure correctly reported in sysfs. v1-v2 (Chris Wilson): v2: Use a common timer for the ring sampling. v3: (Tvrtko Ursulin) * Decouple uAPI from i915 engine ids. * Complete uAPI defines. * Refactor some code to helpers for clarity. * Skip sampling disabled engines. * Expose counters in sysfs. * Pass in fake regs to avoid null ptr deref in perf core. * Convert to class/instance uAPI. * Use shared driver code for rc6 residency, power and frequency. v4: (Dmitry Rogozhkin) * Register PMU with .task_ctx_nr=perf_invalid_context * Expose cpumask for the PMU with the single CPU in the mask * Properly support pmu->stop(): it should call pmu->read() * Properly support pmu->del(): it should call stop(event, PERF_EF_UPDATE) * Introduce refcounting of event subscriptions. * Make pmu.busy_stats a refcounter to avoid busy stats going away with some deleted event. * Expose cpumask for i915 PMU to avoid multiple events creation of the same type followed by counter aggregation by perf-stat. * Track CPUs getting online/offline to migrate perf context. If (likely) cpumask will initially set CPU0, CONFIG_BOOTPARAM_HOTPLUG_CPU0 will be needed to see effect of CPU status tracking. * End result is that only global events are supported and perf stat works correctly. * Deny perf driver level sampling - it is prohibited for uncore PMU. v5: (Tvrtko Ursulin) * Don't hardcode number of engine samplers. * Rewrite event ref-counting for correctness and simplicity. * Store initial counter value when starting already enabled events to correctly report values to all listeners. * Fix RC6 residency readout. * Comments, GPL header. v6: * Add missing entry to v4 changelog. * Fix accounting in CPU hotplug case by copying the approach from arch/x86/events/intel/cstate.c. (Dmitry Rogozhkin) v7: * Log failure message only on failure. * Remove CPU hotplug notification state on unregister. v8: * Fix error unwind on failed registration. * Checkpatch cleanup. v9: * Drop the energy metric, it is available via intel_rapl_perf. (Ville Syrjälä) * Use HAS_RC6(p). (Chris Wilson) * Handle unsupported non-engine events. (Dmitry Rogozhkin) * Rebase for intel_rc6_residency_ns needing caller managed runtime pm. * Drop HAS_RC6 checks from the read callback since creating those events will be rejected at init time already. * Add counter units to sysfs so perf stat output is nicer. * Cleanup the attribute tables for brevity and readability. v10: * Fixed queued accounting. v11: * Move intel_engine_lookup_user to intel_engine_cs.c * Commit update. (Joonas Lahtinen) v12: * More accurate sampling. (Chris Wilson) * Store and report frequency in MHz for better usability from perf stat. * Removed metrics: queued, interrupts, rc6 counters. * Sample engine busyness based on seqno difference only for less MMIO (and forcewake) on all platforms. (Chris Wilson) v13: * Comment spelling, use mul_u32_u32 to work around potential GCC issue and somne code alignment changes. (Chris Wilson) v14: * Rebase. v15: * Rebase for RPS refactoring. v16: * Use the dynamic slot in the CPU hotplug state machine so that we are free to setup our state as multi-instance. Previously we were re-using the CPUHP_AP_PERF_X86_UNCORE_ONLINE slot which is neither used as multi-instance, nor owned by our driver to start with. * Register the CPU hotplug handlers after the PMU, otherwise the callback will get called before the PMU is initialized which can end up in perf_pmu_migrate_context with an un-initialized base. * Added workaround for a probable bug in cpuhp core. v17: * Remove workaround for the cpuhp bug. v18: * Rebase for drm_i915_gem_engine_class getting upstream before us. v19: * Rebase. (trivial) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-2-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:45 +07:00
static u8 user_class_map[] = {
[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
};
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
if (class >= ARRAY_SIZE(user_class_map))
return NULL;
class = user_class_map[class];
GEM_BUG_ON(class > MAX_ENGINE_CLASS);
if (instance > MAX_ENGINE_INSTANCE)
return NULL;
return i915->engine_class[class][instance];
}
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
/**
* intel_enable_engine_stats() - Enable engine busy tracking on engine
* @engine: engine to enable stats collection
*
* Start collecting the engine busyness data for @engine.
*
* Returns 0 on success or a negative error code.
*/
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
unsigned long flags;
int err = 0;
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
if (!intel_engine_supports_stats(engine))
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
return -ENODEV;
drm/i915/execlists: Direct submission of new requests (avoid tasklet/ksoftirqd) Back in commit 27af5eea54d1 ("drm/i915: Move execlists irq handler to a bottom half"), we came to the conclusion that running our CSB processing and ELSP submission from inside the irq handler was a bad idea. A really bad idea as we could impose nearly 1s latency on other users of the system, on average! Deferring our work to a tasklet allowed us to do the processing with irqs enabled, reducing the impact to an average of about 50us. We have since eradicated the use of forcewaked mmio from inside the CSB processing and ELSP submission, bringing the impact down to around 5us (on Kabylake); an order of magnitude better than our measurements 2 years ago on Broadwell and only about 2x worse on average than the gem_syslatency on an unladen system. In this iteration of the tasklet-vs-direct submission debate, we seek a compromise where by we submit new requests immediately to the HW but defer processing the CS interrupt onto a tasklet. We gain the advantage of low-latency and ksoftirqd avoidance when waking up the HW, while avoiding the system-wide starvation of our CS irq-storms. Comparing the impact on the maximum latency observed (that is the time stolen from an RT process) over a 120s interval, repeated several times (using gem_syslatency, similar to RT's cyclictest) while the system is fully laden with i915 nops, we see that direct submission an actually improve the worse case. Maximum latency in microseconds of a third party RT thread (gem_syslatency -t 120 -f 2) x Always using tasklets (a couple of >1000us outliers removed) + Only using tasklets from CS irq, direct submission of requests +------------------------------------------------------------------------+ | + | | + | | + | | + + | | + + + | | + + + + x x x | | +++ + + + x x x x x x | | +++ + ++ + + *x x x x x x | | +++ + ++ + * *x x * x x x | | + +++ + ++ * * +*xxx * x x xx | | * +++ + ++++* *x+**xx+ * x x xxxx x | | **x++++*++**+*x*x****x+ * +x xx xxxx x x | |x* ******+***************++*+***xxxxxx* xx*x xxx + x+| | |__________MA___________| | | |______M__A________| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 118 91 186 124 125.28814 16.279137 + 120 92 187 109 112.00833 13.458617 Difference at 95.0% confidence -13.2798 +/- 3.79219 -10.5994% +/- 3.02677% (Student's t, pooled s = 14.9237) However the mean latency is adversely affected: Mean latency in microseconds of a third party RT thread (gem_syslatency -t 120 -f 1) x Always using tasklets + Only using tasklets from CS irq, direct submission of requests +------------------------------------------------------------------------+ | xxxxxx + ++ | | xxxxxx + ++ | | xxxxxx + +++ ++ | | xxxxxxx +++++ ++ | | xxxxxxx +++++ ++ | | xxxxxxx +++++ +++ | | xxxxxxx + ++++++++++ | | xxxxxxxx ++ ++++++++++ | | xxxxxxxx ++ ++++++++++ | | xxxxxxxxxx +++++++++++++++ | | xxxxxxxxxxx x +++++++++++++++ | |x xxxxxxxxxxxxx x + + ++++++++++++++++++ +| | |__A__| | | |____A___| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 120 3.506 3.727 3.631 3.6321417 0.02773109 + 120 3.834 4.149 4.039 4.0375167 0.041221676 Difference at 95.0% confidence 0.405375 +/- 0.00888913 11.1608% +/- 0.244735% (Student's t, pooled s = 0.03513) However, since the mean latency corresponds to the amount of irqsoff processing we have to do for a CS interrupt, we only need to speed that up to benefit not just system latency but our own throughput. v2: Remember to defer submissions when under reset. v4: Only use direct submission for new requests v5: Be aware that with mixing direct tasklet evaluation and deferred tasklets, we may end up idling before running the deferred tasklet. v6: Remove the redudant likely() from tasklet_is_enabled(), restrict the annotation to reset_in_progress(). v7: Take the full timeline.lock when enabling perf_pmu stats as the tasklet is no longer a valid guard. A consequence is that the stats are now only valid for engines also using the timeline.lock to process state. Testcase: igt/gem_exec_latency/*rthog* References: 27af5eea54d1 ("drm/i915: Move execlists irq handler to a bottom half") Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180628201211.13837-9-chris@chris-wilson.co.uk
2018-06-29 03:12:11 +07:00
spin_lock_irqsave(&engine->timeline.lock, flags);
write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
goto unlock;
}
if (engine->stats.enabled++ == 0) {
const struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
engine->stats.enabled_at = ktime_get();
/* XXX submission method oblivious? */
while (num_ports-- && port_isset(port)) {
engine->stats.active++;
port++;
}
if (engine->stats.active)
engine->stats.start = engine->stats.enabled_at;
}
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
unlock:
drm/i915/execlists: Direct submission of new requests (avoid tasklet/ksoftirqd) Back in commit 27af5eea54d1 ("drm/i915: Move execlists irq handler to a bottom half"), we came to the conclusion that running our CSB processing and ELSP submission from inside the irq handler was a bad idea. A really bad idea as we could impose nearly 1s latency on other users of the system, on average! Deferring our work to a tasklet allowed us to do the processing with irqs enabled, reducing the impact to an average of about 50us. We have since eradicated the use of forcewaked mmio from inside the CSB processing and ELSP submission, bringing the impact down to around 5us (on Kabylake); an order of magnitude better than our measurements 2 years ago on Broadwell and only about 2x worse on average than the gem_syslatency on an unladen system. In this iteration of the tasklet-vs-direct submission debate, we seek a compromise where by we submit new requests immediately to the HW but defer processing the CS interrupt onto a tasklet. We gain the advantage of low-latency and ksoftirqd avoidance when waking up the HW, while avoiding the system-wide starvation of our CS irq-storms. Comparing the impact on the maximum latency observed (that is the time stolen from an RT process) over a 120s interval, repeated several times (using gem_syslatency, similar to RT's cyclictest) while the system is fully laden with i915 nops, we see that direct submission an actually improve the worse case. Maximum latency in microseconds of a third party RT thread (gem_syslatency -t 120 -f 2) x Always using tasklets (a couple of >1000us outliers removed) + Only using tasklets from CS irq, direct submission of requests +------------------------------------------------------------------------+ | + | | + | | + | | + + | | + + + | | + + + + x x x | | +++ + + + x x x x x x | | +++ + ++ + + *x x x x x x | | +++ + ++ + * *x x * x x x | | + +++ + ++ * * +*xxx * x x xx | | * +++ + ++++* *x+**xx+ * x x xxxx x | | **x++++*++**+*x*x****x+ * +x xx xxxx x x | |x* ******+***************++*+***xxxxxx* xx*x xxx + x+| | |__________MA___________| | | |______M__A________| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 118 91 186 124 125.28814 16.279137 + 120 92 187 109 112.00833 13.458617 Difference at 95.0% confidence -13.2798 +/- 3.79219 -10.5994% +/- 3.02677% (Student's t, pooled s = 14.9237) However the mean latency is adversely affected: Mean latency in microseconds of a third party RT thread (gem_syslatency -t 120 -f 1) x Always using tasklets + Only using tasklets from CS irq, direct submission of requests +------------------------------------------------------------------------+ | xxxxxx + ++ | | xxxxxx + ++ | | xxxxxx + +++ ++ | | xxxxxxx +++++ ++ | | xxxxxxx +++++ ++ | | xxxxxxx +++++ +++ | | xxxxxxx + ++++++++++ | | xxxxxxxx ++ ++++++++++ | | xxxxxxxx ++ ++++++++++ | | xxxxxxxxxx +++++++++++++++ | | xxxxxxxxxxx x +++++++++++++++ | |x xxxxxxxxxxxxx x + + ++++++++++++++++++ +| | |__A__| | | |____A___| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 120 3.506 3.727 3.631 3.6321417 0.02773109 + 120 3.834 4.149 4.039 4.0375167 0.041221676 Difference at 95.0% confidence 0.405375 +/- 0.00888913 11.1608% +/- 0.244735% (Student's t, pooled s = 0.03513) However, since the mean latency corresponds to the amount of irqsoff processing we have to do for a CS interrupt, we only need to speed that up to benefit not just system latency but our own throughput. v2: Remember to defer submissions when under reset. v4: Only use direct submission for new requests v5: Be aware that with mixing direct tasklet evaluation and deferred tasklets, we may end up idling before running the deferred tasklet. v6: Remove the redudant likely() from tasklet_is_enabled(), restrict the annotation to reset_in_progress(). v7: Take the full timeline.lock when enabling perf_pmu stats as the tasklet is no longer a valid guard. A consequence is that the stats are now only valid for engines also using the timeline.lock to process state. Testcase: igt/gem_exec_latency/*rthog* References: 27af5eea54d1 ("drm/i915: Move execlists irq handler to a bottom half") Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180628201211.13837-9-chris@chris-wilson.co.uk
2018-06-29 03:12:11 +07:00
write_sequnlock(&engine->stats.lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
return err;
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
}
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
ktime_t total = engine->stats.total;
/*
* If the engine is executing something at the moment
* add it to the total.
*/
if (engine->stats.active)
total = ktime_add(total,
ktime_sub(ktime_get(), engine->stats.start));
return total;
}
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
unsigned int seq;
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
ktime_t total;
do {
seq = read_seqbegin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine);
} while (read_seqretry(&engine->stats.lock, seq));
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
return total;
}
/**
* intel_disable_engine_stats() - Disable engine busy tracking on engine
* @engine: engine to disable stats collection
*
* Stops collecting the engine busyness data for @engine.
*/
void intel_disable_engine_stats(struct intel_engine_cs *engine)
{
unsigned long flags;
if (!intel_engine_supports_stats(engine))
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
WARN_ON_ONCE(engine->stats.enabled == 0);
if (--engine->stats.enabled == 0) {
engine->stats.total = __intel_engine_get_busy_time(engine);
engine->stats.active = 0;
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
drm/i915: Engine busy time tracking Track total time requests have been executing on the hardware. We add new kernel API to allow software tracking of time GPU engines are spending executing requests. Both per-engine and global API is added with the latter also being exported for use by external users. v2: * Squashed with the internal API. * Dropped static key. * Made per-engine. * Store time in monotonic ktime. v3: Moved stats clearing to disable. v4: * Comments. * Don't export the API just yet. v5: Whitespace cleanup. v6: * Rename ref to active. * Drop engine aggregate stats for now. * Account initial busy period after enabling stats. v7: * Rebase. v8: * Move context in notification after the notifier. (Chris Wilson) v9: In cases where stats tracking is getting disabled while there is an active context on an engine, add up the current value to the total. This also implies we don't clear the total when tracking is disabled any longer. There is no real need to do so because we define the stats as relative while enabled, meaning comparison between two samples while tracking is enabled is the valid usage. However, when busy stats will later be plugged into the perf PMU API, it is beneficial to not reset the total, since the PMU core likes to do some counter disable/enable cycles on startup, and while doing so during a single long context executing on an engine we would lose some accuracy and so make unit testing more difficult than needs to be. v10: * Fix accounting for preemption. v11: * Rebase for i915_modparams.enable_execlists removal. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171121181852.16128-5-tvrtko.ursulin@linux.intel.com
2017-11-22 01:18:48 +07:00
}
static bool match_ring(struct i915_request *rq)
{
u32 ring = ENGINE_READ(rq->engine, RING_START);
return ring == i915_ggtt_offset(rq->ring->vma);
}
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
* crucially ordered with an interrupt. After a hang, the GPU is dead
* and we assume that no more writes can happen (we waited long enough
* for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
if (i915_request_completed(request))
continue;
if (!i915_request_started(request))
break;
/* More than one preemptible request may match! */
if (!match_ring(request))
break;
active = request;
break;
}
spin_unlock_irqrestore(&engine->timeline.lock, flags);
return active;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"
#endif