drm/i915: split out uncore_mmio_debug

Multiple uncore structures will share the debug infrastructure, so
move it to a common place and add extra locking around it.
Also, since we now have a separate object, it is cleaner to have
dedicated functions working on the object to stop and restart the
mmio debug. Apart from the cosmetic changes, this patch introduces
2 functional updates:

- All calls to check_for_unclaimed_mmio will now return false when
  the debug is suspended, not just the ones that are active only when
  i915_modparams.mmio_debug is set. If we don't trust the result of the
  check while a user is doing mmio access then we shouldn't attempt the
  check anywhere.

- i915_modparams.mmio_debug is not save/restored anymore around user
  access. The value is now never touched by the kernel while debug is
  disabled so no need for save/restore.

v2: squash mmio_debug patches, restrict mmio_debug lock usage (Chris)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190809063116.7527-1-chris@chris-wilson.co.uk
This commit is contained in:
Daniele Ceraolo Spurio 2019-08-09 07:31:16 +01:00 committed by Chris Wilson
parent 72e67f0463
commit 0a9b26306d
5 changed files with 79 additions and 36 deletions

View File

@ -1149,7 +1149,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
unsigned int tmp; unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n", seq_printf(m, "user.bypass_count = %u\n",
uncore->user_forcewake.count); uncore->user_forcewake_count);
for_each_fw_domain(fw_domain, uncore, tmp) for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n", seq_printf(m, "%s.wake_count = %u\n",

View File

@ -485,6 +485,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv); intel_device_info_subplatform_init(dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
intel_uncore_init_early(&dev_priv->uncore, dev_priv); intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
@ -1785,7 +1786,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out: out:
enable_rpm_wakeref_asserts(rpm); enable_rpm_wakeref_asserts(rpm);
if (!dev_priv->uncore.user_forcewake.count) if (!dev_priv->uncore.user_forcewake_count)
intel_runtime_pm_driver_release(rpm); intel_runtime_pm_driver_release(rpm);
return ret; return ret;

View File

@ -1369,6 +1369,7 @@ struct drm_i915_private {
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
struct intel_uncore uncore; struct intel_uncore uncore;
struct intel_uncore_mmio_debug mmio_debug;
struct i915_virtual_gpu vgpu; struct i915_virtual_gpu vgpu;

View File

@ -34,6 +34,32 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
{
spin_lock_init(&mmio_debug->lock);
mmio_debug->unclaimed_mmio_check = 1;
}
static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
{
lockdep_assert_held(&mmio_debug->lock);
/* Save and disable mmio debugging for the user bypass */
if (!mmio_debug->suspend_count++) {
mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
mmio_debug->unclaimed_mmio_check = 0;
}
}
static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
{
lockdep_assert_held(&mmio_debug->lock);
if (!--mmio_debug->suspend_count)
mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
}
static const char * const forcewake_domain_names[] = { static const char * const forcewake_domain_names[] = {
"render", "render",
"blitter", "blitter",
@ -476,6 +502,11 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
{ {
bool ret = false; bool ret = false;
lockdep_assert_held(&uncore->debug->lock);
if (uncore->debug->suspend_count)
return false;
if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
ret |= fpga_check_for_unclaimed_mmio(uncore); ret |= fpga_check_for_unclaimed_mmio(uncore);
@ -608,17 +639,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{ {
spin_lock_irq(&uncore->lock); spin_lock_irq(&uncore->lock);
if (!uncore->user_forcewake.count++) { if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
spin_lock(&uncore->debug->lock);
/* Save and disable mmio debugging for the user bypass */ mmio_debug_suspend(uncore->debug);
uncore->user_forcewake.saved_mmio_check = spin_unlock(&uncore->debug->lock);
uncore->unclaimed_mmio_check;
uncore->user_forcewake.saved_mmio_debug =
i915_modparams.mmio_debug;
uncore->unclaimed_mmio_check = 0;
i915_modparams.mmio_debug = 0;
} }
spin_unlock_irq(&uncore->lock); spin_unlock_irq(&uncore->lock);
} }
@ -633,15 +658,14 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{ {
spin_lock_irq(&uncore->lock); spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake.count) { if (!--uncore->user_forcewake_count) {
if (intel_uncore_unclaimed_mmio(uncore)) spin_lock(&uncore->debug->lock);
mmio_debug_resume(uncore->debug);
if (check_for_unclaimed_mmio(uncore))
dev_info(uncore->i915->drm.dev, dev_info(uncore->i915->drm.dev,
"Invalid mmio detected during user access\n"); "Invalid mmio detected during user access\n");
spin_unlock(&uncore->debug->lock);
uncore->unclaimed_mmio_check =
uncore->user_forcewake.saved_mmio_check;
i915_modparams.mmio_debug =
uncore->user_forcewake.saved_mmio_debug;
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
} }
@ -1088,7 +1112,16 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
if (likely(!i915_modparams.mmio_debug)) if (likely(!i915_modparams.mmio_debug))
return; return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
lockdep_assert_held(&uncore->lock);
if (before)
spin_lock(&uncore->debug->lock);
__unclaimed_reg_debug(uncore, reg, read, before); __unclaimed_reg_debug(uncore, reg, read, before);
if (!before)
spin_unlock(&uncore->debug->lock);
} }
#define GEN2_READ_HEADER(x) \ #define GEN2_READ_HEADER(x) \
@ -1607,6 +1640,7 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
spin_lock_init(&uncore->lock); spin_lock_init(&uncore->lock);
uncore->i915 = i915; uncore->i915 = i915;
uncore->rpm = &i915->runtime_pm; uncore->rpm = &i915->runtime_pm;
uncore->debug = &i915->mmio_debug;
} }
static void uncore_raw_init(struct intel_uncore *uncore) static void uncore_raw_init(struct intel_uncore *uncore)
@ -1632,7 +1666,6 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ret = intel_uncore_fw_domains_init(uncore); ret = intel_uncore_fw_domains_init(uncore);
if (ret) if (ret)
return ret; return ret;
forcewake_early_sanitize(uncore, 0); forcewake_early_sanitize(uncore, 0);
if (IS_GEN_RANGE(i915, 6, 7)) { if (IS_GEN_RANGE(i915, 6, 7)) {
@ -1681,8 +1714,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE; uncore->flags |= UNCORE_HAS_FORCEWAKE;
uncore->unclaimed_mmio_check = 1;
if (!intel_uncore_has_forcewake(uncore)) { if (!intel_uncore_has_forcewake(uncore)) {
uncore_raw_init(uncore); uncore_raw_init(uncore);
} else { } else {
@ -1707,7 +1738,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
uncore->flags |= UNCORE_HAS_FIFO; uncore->flags |= UNCORE_HAS_FIFO;
/* clear out unclaimed reg detection bit */ /* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(uncore)) if (intel_uncore_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
return 0; return 0;
@ -1952,7 +1983,13 @@ int __intel_wait_for_register(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{ {
return check_for_unclaimed_mmio(uncore); bool ret;
spin_lock_irq(&uncore->debug->lock);
ret = check_for_unclaimed_mmio(uncore);
spin_unlock_irq(&uncore->debug->lock);
return ret;
} }
bool bool
@ -1960,24 +1997,24 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{ {
bool ret = false; bool ret = false;
spin_lock_irq(&uncore->lock); spin_lock_irq(&uncore->debug->lock);
if (unlikely(uncore->unclaimed_mmio_check <= 0)) if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
goto out; goto out;
if (unlikely(intel_uncore_unclaimed_mmio(uncore))) { if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) { if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, " DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n"); "Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++; i915_modparams.mmio_debug++;
} }
uncore->unclaimed_mmio_check--; uncore->debug->unclaimed_mmio_check--;
ret = true; ret = true;
} }
out: out:
spin_unlock_irq(&uncore->lock); spin_unlock_irq(&uncore->debug->lock);
return ret; return ret;
} }

View File

@ -36,6 +36,13 @@ struct drm_i915_private;
struct intel_runtime_pm; struct intel_runtime_pm;
struct intel_uncore; struct intel_uncore;
struct intel_uncore_mmio_debug {
spinlock_t lock; /** lock is also taken in irq contexts. */
int unclaimed_mmio_check;
int saved_mmio_check;
u32 suspend_count;
};
enum forcewake_domain_id { enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0, FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER, FW_DOMAIN_ID_BLITTER,
@ -137,14 +144,9 @@ struct intel_uncore {
u32 __iomem *reg_ack; u32 __iomem *reg_ack;
} *fw_domain[FW_DOMAIN_ID_COUNT]; } *fw_domain[FW_DOMAIN_ID_COUNT];
struct { unsigned int user_forcewake_count;
unsigned int count;
int saved_mmio_check; struct intel_uncore_mmio_debug *debug;
int saved_mmio_debug;
} user_forcewake;
int unclaimed_mmio_check;
}; };
/* Iterate over initialised fw domains */ /* Iterate over initialised fw domains */
@ -179,6 +181,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO; return uncore->flags & UNCORE_HAS_FIFO;
} }
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore, void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915); struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore);