mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 12:30:11 +07:00
drm/i915: update rpm_get/put to use the rpm structure
The functions where internally already only using the structure, so we need to just flip the interface. v2: rebase Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-7-daniele.ceraolospurio@intel.com
This commit is contained in:
parent
69c6635544
commit
d858d5695f
@ -222,6 +222,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
bool write = area->vm_flags & VM_WRITE;
|
||||
intel_wakeref_t wakeref;
|
||||
@ -243,7 +244,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(rpm);
|
||||
|
||||
srcu = i915_reset_trylock(i915);
|
||||
if (srcu < 0) {
|
||||
@ -308,7 +309,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
goto err_fence;
|
||||
|
||||
/* Mark as being mmapped into userspace for later revocation */
|
||||
assert_rpm_wakelock_held(&i915->runtime_pm);
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
|
||||
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
|
||||
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
@ -327,7 +328,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
err_reset:
|
||||
i915_reset_unlock(i915, srcu);
|
||||
err_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
err:
|
||||
switch (ret) {
|
||||
@ -410,7 +411,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
||||
* wakeref.
|
||||
*/
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (!obj->userfault_count)
|
||||
goto out;
|
||||
@ -427,7 +428,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
||||
wmb();
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
static int create_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
|
@ -181,7 +181,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
llist_for_each_entry_safe(obj, on, freed, freed) {
|
||||
struct i915_vma *vma, *vn;
|
||||
|
||||
@ -243,7 +243,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
||||
|
@ -182,7 +182,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
||||
* we will force the wake during oom-notifier.
|
||||
*/
|
||||
if (shrink & I915_SHRINK_BOUND) {
|
||||
wakeref = intel_runtime_pm_get_if_in_use(i915);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
|
||||
if (!wakeref)
|
||||
shrink &= ~I915_SHRINK_BOUND;
|
||||
}
|
||||
@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
||||
}
|
||||
|
||||
if (shrink & I915_SHRINK_BOUND)
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
i915_retire_requests(i915);
|
||||
|
||||
|
@ -1754,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
|
||||
return PTR_ERR(file);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
ctx = live_context(dev_priv, file);
|
||||
if (IS_ERR(ctx)) {
|
||||
@ -1768,7 +1768,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
|
||||
err = i915_subtests(tests, ctx);
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
mock_file_free(dev_priv, file);
|
||||
|
@ -293,7 +293,7 @@ static int igt_gem_coherency(void *arg)
|
||||
values = offsets + ncachelines;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
for (over = igt_coherency_mode; over->name; over++) {
|
||||
if (!over->set)
|
||||
continue;
|
||||
@ -371,7 +371,7 @@ static int igt_gem_coherency(void *arg)
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
kfree(offsets);
|
||||
return err;
|
||||
|
@ -53,7 +53,7 @@ static int live_nop_switch(void *arg)
|
||||
return PTR_ERR(file);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx) {
|
||||
@ -156,7 +156,7 @@ static int live_nop_switch(void *arg)
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
mock_file_free(i915, file);
|
||||
return err;
|
||||
@ -1084,7 +1084,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
ce = i915_gem_context_get_engine(ctx, RCS0);
|
||||
if (IS_ERR(ce)) {
|
||||
@ -1124,7 +1124,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
||||
out_context:
|
||||
intel_context_put(ce);
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
out_unlock:
|
||||
@ -1541,7 +1541,7 @@ static int igt_vm_isolation(void *arg)
|
||||
GEM_BUG_ON(ctx_b->vm->total != vm_total);
|
||||
vm_total -= I915_GTT_PAGE_SIZE;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
count = 0;
|
||||
for_each_engine(engine, i915, id) {
|
||||
@ -1586,7 +1586,7 @@ static int igt_vm_isolation(void *arg)
|
||||
count, RUNTIME_INFO(i915)->num_engines);
|
||||
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
out_unlock:
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
|
@ -205,7 +205,7 @@ static int igt_partial_tiling(void *arg)
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (1) {
|
||||
IGT_TIMEOUT(end);
|
||||
@ -316,7 +316,7 @@ next_tiling: ;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
out:
|
||||
|
@ -1103,7 +1103,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
||||
return true;
|
||||
|
||||
/* If the whole device is asleep, the engine must be idle */
|
||||
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
|
||||
if (!wakeref)
|
||||
return true;
|
||||
|
||||
@ -1117,7 +1117,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
||||
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
|
||||
idle = false;
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return idle;
|
||||
}
|
||||
@ -1531,10 +1531,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
|
||||
if (wakeref) {
|
||||
intel_engine_print_registers(engine, m);
|
||||
intel_runtime_pm_put(engine->i915, wakeref);
|
||||
intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
|
||||
} else {
|
||||
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
if (i915_terminally_wedged(dev_priv))
|
||||
return;
|
||||
|
||||
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
|
||||
if (!wakeref)
|
||||
return;
|
||||
|
||||
@ -324,7 +324,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
if (hung)
|
||||
hangcheck_declare_hang(dev_priv, hung, stuck);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
/* Reset timer in case GPU hangs without another request being added */
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
|
@ -1311,7 +1311,7 @@ void i915_handle_error(struct drm_i915_private *i915,
|
||||
* isn't the case at least when we get here by doing a
|
||||
* simulated reset via debugfs, so get an RPM reference.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
engine_mask &= INTEL_INFO(i915)->engine_mask;
|
||||
|
||||
@ -1374,7 +1374,7 @@ void i915_handle_error(struct drm_i915_private *i915,
|
||||
wake_up_all(&error->reset_queue);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
int i915_reset_trylock(struct drm_i915_private *i915)
|
||||
|
@ -394,7 +394,7 @@ static int igt_reset_nop(void *arg)
|
||||
}
|
||||
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
reset_count = i915_reset_count(&i915->gpu_error);
|
||||
count = 0;
|
||||
do {
|
||||
@ -441,7 +441,7 @@ static int igt_reset_nop(void *arg)
|
||||
err = igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
out:
|
||||
mock_file_free(i915, file);
|
||||
@ -478,7 +478,7 @@ static int igt_reset_nop_engine(void *arg)
|
||||
}
|
||||
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
for_each_engine(engine, i915, id) {
|
||||
unsigned int reset_count, reset_engine_count;
|
||||
unsigned int count;
|
||||
@ -549,7 +549,7 @@ static int igt_reset_nop_engine(void *arg)
|
||||
err = igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
out:
|
||||
mock_file_free(i915, file);
|
||||
if (i915_reset_failed(i915))
|
||||
@ -1749,7 +1749,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
||||
if (i915_terminally_wedged(i915))
|
||||
return -EIO; /* we're long past hope of a successful reset */
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
|
||||
drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
|
||||
|
||||
@ -1760,7 +1760,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
i915_modparams.enable_hangcheck = saved_hangcheck;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ static int live_sanitycheck(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin, i915))
|
||||
goto err_unlock;
|
||||
@ -74,7 +74,7 @@ static int live_sanitycheck(void *arg)
|
||||
igt_spinner_fini(&spin);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -97,7 +97,7 @@ static int live_busywait_preempt(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
if (!ctx_hi)
|
||||
@ -255,7 +255,7 @@ static int live_busywait_preempt(void *arg)
|
||||
err_unlock:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -277,7 +277,7 @@ static int live_preempt(void *arg)
|
||||
pr_err("Logical preemption supported, but not exposed\n");
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
@ -362,7 +362,7 @@ static int live_preempt(void *arg)
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -382,7 +382,7 @@ static int live_late_preempt(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
@ -466,7 +466,7 @@ static int live_late_preempt(void *arg)
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
|
||||
@ -532,7 +532,7 @@ static int live_suppress_self_preempt(void *arg)
|
||||
return 0; /* presume black blox */
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (preempt_client_init(i915, &a))
|
||||
goto err_unlock;
|
||||
@ -606,7 +606,7 @@ static int live_suppress_self_preempt(void *arg)
|
||||
err_unlock:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
|
||||
@ -683,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
|
||||
goto err_unlock;
|
||||
@ -776,7 +776,7 @@ static int live_suppress_wait_preempt(void *arg)
|
||||
err_unlock:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
|
||||
@ -807,7 +807,7 @@ static int live_chain_preempt(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (preempt_client_init(i915, &hi))
|
||||
goto err_unlock;
|
||||
@ -924,7 +924,7 @@ static int live_chain_preempt(void *arg)
|
||||
err_unlock:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
|
||||
@ -953,7 +953,7 @@ static int live_preempt_hang(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
if (igt_spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
@ -1050,7 +1050,7 @@ static int live_preempt_hang(void *arg)
|
||||
igt_spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -1256,7 +1256,7 @@ static int live_preempt_smoke(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&smoke.i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(smoke.i915);
|
||||
wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
|
||||
|
||||
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
|
||||
if (IS_ERR(smoke.batch)) {
|
||||
@ -1309,7 +1309,7 @@ static int live_preempt_smoke(void *arg)
|
||||
err_batch:
|
||||
i915_gem_object_put(smoke.batch);
|
||||
err_unlock:
|
||||
intel_runtime_pm_put(smoke.i915, wakeref);
|
||||
intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&smoke.i915->drm.struct_mutex);
|
||||
kfree(smoke.contexts);
|
||||
|
||||
|
@ -42,14 +42,14 @@ static int igt_wedged_reset(void *arg)
|
||||
/* Check that we can recover a wedged device with a GPU reset */
|
||||
|
||||
igt_global_reset_lock(i915);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
i915_gem_set_wedged(i915);
|
||||
|
||||
GEM_BUG_ON(!i915_reset_failed(i915));
|
||||
i915_reset(i915, ALL_ENGINES, NULL);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
igt_global_reset_unlock(i915);
|
||||
|
||||
return i915_reset_failed(i915) ? -EIO : 0;
|
||||
|
@ -636,7 +636,7 @@ static int live_dirty_whitelist(void *arg)
|
||||
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
|
||||
return 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
file = mock_file(i915);
|
||||
@ -666,7 +666,7 @@ static int live_dirty_whitelist(void *arg)
|
||||
mock_file_free(i915, file);
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1055,7 +1055,7 @@ live_gpu_reset_workarounds(void *arg)
|
||||
pr_info("Verifying after GPU reset...\n");
|
||||
|
||||
igt_global_reset_lock(i915);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
reference_lists_init(i915, &lists);
|
||||
|
||||
@ -1070,7 +1070,7 @@ live_gpu_reset_workarounds(void *arg)
|
||||
out:
|
||||
kernel_context_close(ctx);
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
igt_global_reset_unlock(i915);
|
||||
|
||||
return ok ? 0 : -ESRCH;
|
||||
@ -1097,7 +1097,7 @@ live_engine_reset_workarounds(void *arg)
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
igt_global_reset_lock(i915);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
reference_lists_init(i915, &lists);
|
||||
|
||||
@ -1154,7 +1154,7 @@ live_engine_reset_workarounds(void *arg)
|
||||
|
||||
err:
|
||||
reference_lists_fini(i915, &lists);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
igt_global_reset_unlock(i915);
|
||||
kernel_context_close(ctx);
|
||||
|
||||
|
@ -170,7 +170,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
if (WARN_ON(!vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
@ -181,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
struct i915_fence_reg *reg;
|
||||
int i;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(rpm);
|
||||
|
||||
/* Request fences from host */
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
@ -207,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
return 0;
|
||||
out_free_fence:
|
||||
gvt_vgpu_err("Failed to alloc fences\n");
|
||||
@ -220,7 +221,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
vgpu->fence.regs[i] = NULL;
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@ -316,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -584,12 +584,12 @@ enum {
|
||||
|
||||
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
@ -1501,11 +1501,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||
* as there is only one pre-allocated buf-obj for shadow.
|
||||
*/
|
||||
if (list_empty(workload_q_head(vgpu, ring_id))) {
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
@ -490,7 +490,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
intel_wakeref_t wakeref;
|
||||
int i, pipe;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
intel_wakeref_t pref;
|
||||
@ -696,7 +696,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -833,7 +833,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
intel_wakeref_t wakeref;
|
||||
int ret = 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
if (IS_GEN(dev_priv, 5)) {
|
||||
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
||||
@ -1045,7 +1045,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
|
||||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1391,7 +1391,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&fbc->lock);
|
||||
|
||||
if (intel_fbc_is_active(dev_priv))
|
||||
@ -1418,7 +1418,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
}
|
||||
|
||||
mutex_unlock(&fbc->lock);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1468,7 +1468,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
||||
if (!HAS_IPS(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "Enabled by kernel parameter: %s\n",
|
||||
yesno(i915_modparams.enable_ips));
|
||||
@ -1482,7 +1482,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
||||
seq_puts(m, "Currently: disabled\n");
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1561,7 +1561,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
|
||||
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
|
||||
ia_freq = gpu_freq;
|
||||
sandybridge_pcode_read(dev_priv,
|
||||
@ -1575,7 +1575,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
}
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1752,7 +1752,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
||||
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
|
||||
@ -1790,7 +1790,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
seq_puts(m, "L-shaped memory detected\n");
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2303,7 +2303,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
if (!psr->sink_support)
|
||||
return 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&psr->lock);
|
||||
|
||||
if (psr->enabled)
|
||||
@ -2367,7 +2367,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&psr->lock);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2384,11 +2384,11 @@ i915_edp_psr_debug_set(void *data, u64 val)
|
||||
|
||||
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
ret = intel_psr_debug_set(dev_priv, val);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2504,7 +2504,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
||||
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
|
||||
seq_printf(m, "path: %s\n", csr->fw_path);
|
||||
@ -2530,7 +2530,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
|
||||
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2814,7 +2814,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "CRTC info\n");
|
||||
seq_printf(m, "---------\n");
|
||||
@ -2863,7 +2863,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2876,7 +2876,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||
enum intel_engine_id id;
|
||||
struct drm_printer p;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
seq_printf(m, "GT awake? %s [%d]\n",
|
||||
yesno(dev_priv->gt.awake),
|
||||
@ -2888,7 +2888,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
intel_engine_dump(engine, &p, "%s\n", engine->name);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4051,7 +4051,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
|
||||
if (INTEL_GEN(i915) < 6)
|
||||
return 0;
|
||||
|
||||
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
|
||||
file->private_data =
|
||||
(void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
|
||||
intel_uncore_forcewake_user_get(&i915->uncore);
|
||||
|
||||
return 0;
|
||||
@ -4065,7 +4066,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
|
||||
intel_uncore_forcewake_user_put(&i915->uncore);
|
||||
intel_runtime_pm_put(i915,
|
||||
intel_runtime_pm_put(&i915->runtime_pm,
|
||||
(intel_wakeref_t)(uintptr_t)file->private_data);
|
||||
|
||||
return 0;
|
||||
|
@ -374,7 +374,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONFAULT |
|
||||
@ -461,7 +461,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return ret;
|
||||
@ -561,6 +561,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
intel_wakeref_t wakeref;
|
||||
struct drm_mm_node node;
|
||||
struct dma_fence *fence;
|
||||
@ -581,14 +582,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
||||
* This easily dwarfs any performance advantage from
|
||||
* using the cache bypass of indirect GGTT access.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get_if_in_use(i915);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(rpm);
|
||||
if (!wakeref) {
|
||||
ret = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
/* No backing pages, no fallback, we must force GGTT access */
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(rpm);
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
@ -684,7 +685,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
out_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return ret;
|
||||
@ -1174,7 +1175,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
||||
|
||||
GEM_TRACE("\n");
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
|
||||
|
||||
/*
|
||||
@ -1197,7 +1198,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
||||
intel_gt_sanitize(i915, false);
|
||||
|
||||
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
i915_gem_contexts_lost(i915);
|
||||
@ -1815,7 +1816,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
|
||||
* the objects as well, see i915_gem_freeze()
|
||||
*/
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
i915_gem_shrink(i915, -1UL, NULL, ~0);
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
@ -1826,7 +1827,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
|
||||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
* be cleared before we can use any other fences to ensure that
|
||||
* the new fences do not overlap the elided clears, confusing HW.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
|
||||
if (!wakeref) {
|
||||
GEM_BUG_ON(vma);
|
||||
return 0;
|
||||
@ -284,7 +284,7 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(fence->i915, wakeref);
|
||||
intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1825,7 +1825,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
unsigned int pde;
|
||||
bool flush = false;
|
||||
|
||||
wakeref = intel_runtime_pm_get(vm->i915);
|
||||
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
|
||||
|
||||
spin_lock(&ppgtt->base.pd.lock);
|
||||
gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
|
||||
@ -1868,12 +1868,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
gen6_ggtt_invalidate(vm->i915);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(vm->i915, wakeref);
|
||||
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
|
||||
|
||||
return 0;
|
||||
|
||||
unwind_out:
|
||||
intel_runtime_pm_put(vm->i915, wakeref);
|
||||
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
|
||||
gen6_ppgtt_clear_range(vm, from, start - from);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1375,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
||||
free_oa_buffer(dev_priv);
|
||||
|
||||
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv, stream->wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
|
||||
|
||||
if (stream->ctx)
|
||||
oa_put_render_ctx_id(stream);
|
||||
@ -2112,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||
* In our case we are expecting that taking pm + FORCEWAKE
|
||||
* references will effectively disable RC6.
|
||||
*/
|
||||
stream->wakeref = intel_runtime_pm_get(dev_priv);
|
||||
stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
|
||||
ret = alloc_oa_buffer(dev_priv);
|
||||
@ -2148,7 +2148,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||
put_oa_config(dev_priv, stream->oa_config);
|
||||
|
||||
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv, stream->wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
|
||||
|
||||
err_config:
|
||||
if (stream->ctx)
|
||||
|
@ -171,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
|
||||
|
||||
wakeref = 0;
|
||||
if (READ_ONCE(dev_priv->gt.awake))
|
||||
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
|
||||
if (!wakeref)
|
||||
return;
|
||||
|
||||
@ -207,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -443,14 +443,15 @@ static u64 __get_rc6(struct drm_i915_private *i915)
|
||||
static u64 get_rc6(struct drm_i915_private *i915)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_PM)
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned long flags;
|
||||
u64 val;
|
||||
|
||||
wakeref = intel_runtime_pm_get_if_in_use(i915);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(rpm);
|
||||
if (wakeref) {
|
||||
val = __get_rc6(i915);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
|
||||
/*
|
||||
* If we are coming back from being runtime suspended we must
|
||||
@ -469,8 +470,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
||||
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
} else {
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct device *kdev = &pdev->dev;
|
||||
struct device *kdev = rpm->kdev;
|
||||
|
||||
/*
|
||||
* We are runtime suspended.
|
||||
|
@ -264,7 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
||||
intel_wakeref_t wakeref;
|
||||
u32 freq;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
vlv_punit_get(dev_priv);
|
||||
@ -276,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
||||
freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
|
||||
}
|
||||
@ -364,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&rps->lock);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
@ -420,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
@ -444,7 +444,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&rps->lock);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
|
@ -2112,7 +2112,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
||||
* intel_runtime_pm_put(), so it is correct to wrap only the
|
||||
* pin/unpin/fence and not more.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
i915_gem_object_lock(obj);
|
||||
|
||||
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
|
||||
@ -2169,7 +2169,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
||||
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
return vma;
|
||||
}
|
||||
|
||||
@ -13927,7 +13927,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
|
||||
}
|
||||
intel_runtime_pm_put(dev_priv, intel_state->wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
|
||||
|
||||
/*
|
||||
* Defer the cleanup of the old state to a separate worker to not
|
||||
@ -14006,7 +14006,7 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret = 0;
|
||||
|
||||
intel_state->wakeref = intel_runtime_pm_get(dev_priv);
|
||||
intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
drm_atomic_state_get(state);
|
||||
i915_sw_fence_init(&intel_state->commit_ready,
|
||||
@ -14044,7 +14044,7 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
if (ret) {
|
||||
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
|
||||
i915_sw_fence_commit(&intel_state->commit_ready);
|
||||
intel_runtime_pm_put(dev_priv, intel_state->wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -14056,7 +14056,7 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
i915_sw_fence_commit(&intel_state->commit_ready);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
intel_runtime_pm_put(dev_priv, intel_state->wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
|
||||
return ret;
|
||||
}
|
||||
dev_priv->wm.distrust_bios_wm = false;
|
||||
|
@ -1644,7 +1644,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
|
||||
goto out_verify;
|
||||
|
||||
cancel_delayed_work(&power_domains->async_put_work);
|
||||
intel_runtime_pm_put_raw(dev_priv,
|
||||
intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
|
||||
fetch_and_zero(&power_domains->async_put_wakeref));
|
||||
out_verify:
|
||||
verify_async_put_domains_state(power_domains);
|
||||
@ -1684,7 +1684,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
|
||||
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
__intel_display_power_get_domain(dev_priv, domain);
|
||||
@ -1713,7 +1713,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
|
||||
intel_wakeref_t wakeref;
|
||||
bool is_enabled;
|
||||
|
||||
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
|
||||
if (!wakeref)
|
||||
return false;
|
||||
|
||||
@ -1729,7 +1729,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (!is_enabled) {
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
wakeref = 0;
|
||||
}
|
||||
|
||||
@ -1786,7 +1786,7 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain)
|
||||
{
|
||||
__intel_display_power_put(dev_priv, domain);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1806,6 +1806,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(power_domains, struct drm_i915_private,
|
||||
power_domains);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
enum intel_display_power_domain domain;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
@ -1814,8 +1815,8 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
|
||||
* wakeref to make the state checker happy about the HW access during
|
||||
* power well disabling.
|
||||
*/
|
||||
assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
assert_rpm_raw_wakeref_held(rpm);
|
||||
wakeref = intel_runtime_pm_get(rpm);
|
||||
|
||||
for_each_power_domain(domain, mask) {
|
||||
/* Clear before put, so put's sanity check is happy. */
|
||||
@ -1823,7 +1824,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
|
||||
__intel_display_power_put_domain(dev_priv, domain);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1833,7 +1834,8 @@ intel_display_power_put_async_work(struct work_struct *work)
|
||||
container_of(work, struct drm_i915_private,
|
||||
power_domains.async_put_work.work);
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
|
||||
intel_wakeref_t old_work_wakeref = 0;
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
@ -1863,9 +1865,9 @@ intel_display_power_put_async_work(struct work_struct *work)
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (old_work_wakeref)
|
||||
intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
|
||||
intel_runtime_pm_put_raw(rpm, old_work_wakeref);
|
||||
if (new_work_wakeref)
|
||||
intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
|
||||
intel_runtime_pm_put_raw(rpm, new_work_wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1883,7 +1885,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
|
||||
intel_wakeref_t wakeref)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &i915->power_domains;
|
||||
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
|
||||
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
@ -1910,9 +1913,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (work_wakeref)
|
||||
intel_runtime_pm_put_raw(i915, work_wakeref);
|
||||
intel_runtime_pm_put_raw(rpm, work_wakeref);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(rpm, wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1948,7 +1951,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
|
||||
mutex_unlock(&power_domains->lock);
|
||||
|
||||
if (work_wakeref)
|
||||
intel_runtime_pm_put_raw(i915, work_wakeref);
|
||||
intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1987,7 +1990,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
intel_wakeref_t wakeref)
|
||||
{
|
||||
__intel_display_power_put(dev_priv, domain);
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -4402,7 +4405,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *i915)
|
||||
intel_power_domains_verify_state(i915);
|
||||
|
||||
/* Keep the power well enabled, but cancel its rpm wakeref. */
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
/* Pin the GGTT vma for our access via info->screen_base.
|
||||
* This also validates that any existing fb inherited from the
|
||||
@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
ifbdev->vma = vma;
|
||||
ifbdev->vma_flags = flags;
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
vga_switcheroo_client_fb_set(pdev, info);
|
||||
return 0;
|
||||
@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
out_unpin:
|
||||
intel_unpin_fb_vma(vma, flags);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
intel_wakeref_t wakeref;
|
||||
enum hpd_pin pin;
|
||||
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_hpd_pin(pin) {
|
||||
@ -263,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
bool intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
|
@ -369,7 +369,7 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
*
|
||||
* This is the unlocked version of intel_display_power_is_enabled() and should
|
||||
* only be used from error capture and recovery code where deadlocks are
|
||||
@ -384,15 +384,14 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
|
||||
* as True if the wakeref was acquired, or False otherwise.
|
||||
*/
|
||||
|
||||
intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
return __intel_runtime_pm_get(&i915->runtime_pm, false);
|
||||
return __intel_runtime_pm_get(rpm, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get - grab a runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
*
|
||||
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
||||
* code to ensure the GTT or GT is on) and ensures that it is powered up.
|
||||
@ -402,14 +401,14 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
|
||||
*
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
|
||||
*/
|
||||
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
return __intel_runtime_pm_get(&i915->runtime_pm, true);
|
||||
return __intel_runtime_pm_get(rpm, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
*
|
||||
* This function grabs a device-level runtime pm reference if the device is
|
||||
* already in use and ensures that it is powered up. It is illegal to try
|
||||
@ -421,10 +420,8 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
|
||||
* as True if the wakeref was acquired, or False otherwise.
|
||||
*/
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
/*
|
||||
* In cases runtime PM is disabled by the RPM core and we get
|
||||
@ -443,7 +440,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
*
|
||||
* This function grabs a device-level runtime pm reference (mostly used for GEM
|
||||
* code to ensure the GTT or GT is on).
|
||||
@ -460,10 +457,8 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
|
||||
*
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
|
||||
*/
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
pm_runtime_get_noresume(rpm->kdev);
|
||||
|
||||
@ -488,7 +483,7 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_put_raw - release a raw runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
* @wref: wakeref acquired for the reference that is being released
|
||||
*
|
||||
* This function drops the device-level runtime pm reference obtained by
|
||||
@ -496,14 +491,14 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
|
||||
* hardware block right away if this is the last reference.
|
||||
*/
|
||||
void
|
||||
intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
|
||||
intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
|
||||
{
|
||||
__intel_runtime_pm_put(&i915->runtime_pm, wref, false);
|
||||
__intel_runtime_pm_put(rpm, wref, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
*
|
||||
* This function drops the device-level runtime pm reference obtained by
|
||||
* intel_runtime_pm_get() and might power down the corresponding
|
||||
@ -513,24 +508,24 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
|
||||
* new code, as the correctness of its use cannot be checked. Always use
|
||||
* intel_runtime_pm_put() instead.
|
||||
*/
|
||||
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
|
||||
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
__intel_runtime_pm_put(&i915->runtime_pm, -1, true);
|
||||
__intel_runtime_pm_put(rpm, -1, true);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
/**
|
||||
* intel_runtime_pm_put - release a runtime pm reference
|
||||
* @i915: i915 device instance
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
* @wref: wakeref acquired for the reference that is being released
|
||||
*
|
||||
* This function drops the device-level runtime pm reference obtained by
|
||||
* intel_runtime_pm_get() and might power down the corresponding
|
||||
* hardware block right away if this is the last reference.
|
||||
*/
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
|
||||
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
|
||||
{
|
||||
__intel_runtime_pm_put(&i915->runtime_pm, wref, true);
|
||||
__intel_runtime_pm_put(rpm, wref, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -174,30 +174,30 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
|
||||
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
|
||||
void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
|
||||
|
||||
intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
|
||||
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
||||
|
||||
#define with_intel_runtime_pm(i915, wf) \
|
||||
for ((wf) = intel_runtime_pm_get(i915); (wf); \
|
||||
intel_runtime_pm_put((i915), (wf)), (wf) = 0)
|
||||
for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \
|
||||
intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
|
||||
|
||||
#define with_intel_runtime_pm_if_in_use(i915, wf) \
|
||||
for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
|
||||
intel_runtime_pm_put((i915), (wf)), (wf) = 0)
|
||||
for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \
|
||||
intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
|
||||
|
||||
void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
|
||||
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
|
||||
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
||||
#else
|
||||
static inline void
|
||||
intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
|
||||
intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
|
||||
{
|
||||
intel_runtime_pm_put_unchecked(i915);
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
}
|
||||
#endif
|
||||
void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
|
||||
void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
|
||||
|
@ -9,14 +9,14 @@
|
||||
|
||||
static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf)
|
||||
{
|
||||
wf->wakeref = intel_runtime_pm_get(i915);
|
||||
wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
}
|
||||
|
||||
static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf)
|
||||
{
|
||||
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
GEM_BUG_ON(!wakeref);
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ static void wakeref_auto_timeout(struct timer_list *t)
|
||||
wakeref = fetch_and_zero(&wf->wakeref);
|
||||
spin_unlock_irqrestore(&wf->lock, flags);
|
||||
|
||||
intel_runtime_pm_put(wf->i915, wakeref);
|
||||
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
|
||||
@ -116,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
|
||||
spin_lock_irqsave(&wf->lock, flags);
|
||||
if (!refcount_inc_not_zero(&wf->count)) {
|
||||
GEM_BUG_ON(wf->wakeref);
|
||||
wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915);
|
||||
wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
|
||||
refcount_set(&wf->count, 1);
|
||||
}
|
||||
spin_unlock_irqrestore(&wf->lock, flags);
|
||||
|
@ -97,7 +97,7 @@ static int live_active_wait(void *arg)
|
||||
/* Check that we get a callback when requests retire upon waiting */
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = __live_active_setup(i915, &active);
|
||||
|
||||
@ -111,7 +111,7 @@ static int live_active_wait(void *arg)
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -126,7 +126,7 @@ static int live_active_retire(void *arg)
|
||||
/* Check that we get a callback when requests are indirectly retired */
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = __live_active_setup(i915, &active);
|
||||
|
||||
@ -140,7 +140,7 @@ static int live_active_retire(void *arg)
|
||||
}
|
||||
|
||||
i915_active_fini(&active.base);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
|
||||
{
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
/*
|
||||
* As a final sting in the tail, invalidate stolen. Under a real S4,
|
||||
@ -74,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
|
||||
*/
|
||||
trash_stolen(i915);
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
static int pm_prepare(struct drm_i915_private *i915)
|
||||
|
@ -404,7 +404,7 @@ static int igt_evict_contexts(void *arg)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
/* Reserve a block so that we know we have enough to fit a few rq */
|
||||
memset(&hole, 0, sizeof(hole));
|
||||
@ -515,7 +515,7 @@ static int igt_evict_contexts(void *arg)
|
||||
}
|
||||
if (drm_mm_node_allocated(&hole))
|
||||
drm_mm_remove_node(&hole);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
|
@ -295,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
|
||||
mock_vma.node.size = BIT_ULL(size);
|
||||
mock_vma.node.start = addr;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
count = n;
|
||||
|
||||
@ -1171,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
u64 offset = tmp.start + n * PAGE_SIZE;
|
||||
@ -1218,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
|
||||
kfree(order);
|
||||
out_remove:
|
||||
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
drm_mm_remove_node(&tmp);
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
@ -537,7 +537,7 @@ static int live_nop_request(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *request = NULL;
|
||||
@ -597,7 +597,7 @@ static int live_nop_request(void *arg)
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -682,7 +682,7 @@ static int live_empty_request(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
batch = empty_batch(i915);
|
||||
if (IS_ERR(batch)) {
|
||||
@ -746,7 +746,7 @@ static int live_empty_request(void *arg)
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -839,7 +839,7 @@ static int live_all_engines(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||
if (err)
|
||||
@ -919,7 +919,7 @@ static int live_all_engines(void *arg)
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -942,7 +942,7 @@ static int live_sequential_engines(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
err = igt_live_test_begin(&t, i915, __func__, "");
|
||||
if (err)
|
||||
@ -1048,7 +1048,7 @@ static int live_sequential_engines(void *arg)
|
||||
i915_request_put(request[id]);
|
||||
}
|
||||
out_unlock:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -1113,7 +1113,7 @@ static int live_breadcrumbs_smoketest(void *arg)
|
||||
* On real hardware this time.
|
||||
*/
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file)) {
|
||||
@ -1220,7 +1220,7 @@ static int live_breadcrumbs_smoketest(void *arg)
|
||||
out_file:
|
||||
mock_file_free(i915, file);
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -515,7 +515,7 @@ static int live_hwsp_engine(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
count = 0;
|
||||
for_each_engine(engine, i915, id) {
|
||||
@ -558,7 +558,7 @@ static int live_hwsp_engine(void *arg)
|
||||
i915_timeline_put(tl);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
kvfree(timelines);
|
||||
@ -591,7 +591,7 @@ static int live_hwsp_alternate(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
count = 0;
|
||||
for (n = 0; n < NUM_TIMELINES; n++) {
|
||||
@ -634,7 +634,7 @@ static int live_hwsp_alternate(void *arg)
|
||||
i915_timeline_put(tl);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
kvfree(timelines);
|
||||
@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
tl = i915_timeline_create(i915, NULL);
|
||||
if (IS_ERR(tl)) {
|
||||
@ -749,7 +749,7 @@ static int live_hwsp_wrap(void *arg)
|
||||
out_free:
|
||||
i915_timeline_put(tl);
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
@ -771,7 +771,7 @@ static int live_hwsp_recycle(void *arg)
|
||||
*/
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
count = 0;
|
||||
for_each_engine(engine, i915, id) {
|
||||
@ -825,7 +825,7 @@ static int live_hwsp_recycle(void *arg)
|
||||
out:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
|
@ -873,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg)
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
for (t = types; *t; t++) {
|
||||
for (p = planes; p->width; p++) {
|
||||
@ -965,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg)
|
||||
}
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
|
@ -144,7 +144,7 @@ static int igt_guc_clients(void *args)
|
||||
|
||||
GEM_BUG_ON(!HAS_GUC(dev_priv));
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
guc = &dev_priv->guc;
|
||||
if (!guc) {
|
||||
@ -227,7 +227,7 @@ static int igt_guc_clients(void *args)
|
||||
guc_clients_create(guc);
|
||||
guc_clients_enable(guc);
|
||||
unlock:
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -247,7 +247,7 @@ static int igt_guc_doorbells(void *arg)
|
||||
|
||||
GEM_BUG_ON(!HAS_GUC(dev_priv));
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(dev_priv);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
guc = &dev_priv->guc;
|
||||
if (!guc) {
|
||||
@ -340,7 +340,7 @@ static int igt_guc_doorbells(void *arg)
|
||||
guc_client_free(clients[i]);
|
||||
}
|
||||
unlock:
|
||||
intel_runtime_pm_put(dev_priv, wakeref);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
wakeref = intel_runtime_pm_get(i915);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
for_each_fw_domain(domain, uncore, tmp) {
|
||||
smp_store_mb(domain->active, false);
|
||||
@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg)
|
||||
}
|
||||
|
||||
out_rpm:
|
||||
intel_runtime_pm_put(i915, wakeref);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user