drm/i915: Drop struct_mutex from around i915_retire_requests()

We don't need to hold struct_mutex now for retiring requests, so drop it
from i915_retire_requests() and i915_gem_wait_for_idle(), finally
removing I915_WAIT_LOCKED for good.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-8-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-10-04 14:40:02 +01:00
parent b723484069
commit 7e80576266
26 changed files with 213 additions and 460 deletions

View File

@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
goto out_unlock;
goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
@ -229,8 +226,6 @@ static void clear_pages_worker(struct work_struct *work)
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);

View File

@ -1159,8 +1159,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
static int
__intel_context_reconfigure_sseu(struct intel_context *ce,
struct intel_sseu sseu)
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
@ -1183,23 +1182,6 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
return ret;
}
static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
struct drm_i915_private *i915 = ce->engine->i915;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
ret = __intel_context_reconfigure_sseu(ce, sseu);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,

View File

@ -48,11 +48,7 @@ static void retire_work_handler(struct work_struct *work)
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.retire_work.work);
/* Come back later if the device is busy... */
if (mutex_trylock(&i915->drm.struct_mutex)) {
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
i915_retire_requests(i915);
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
@ -86,26 +82,23 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !intel_gt_is_wedged(gt);
do {
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
result = false;
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
} while (i915_retire_requests(gt->i915) && result);
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
result = false;
}
if (intel_gt_pm_wait_for_idle(gt))
result = false;
@ -145,8 +138,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
user_forcewake(&i915->gt, true);
mutex_lock(&i915->drm.struct_mutex);
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
@ -158,8 +149,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
*/
switch_to_kernel_context_sync(&i915->gt);
mutex_unlock(&i915->drm.struct_mutex);
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);

View File

@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
u32 __iomem *map;
int err;
int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto out_rpm;
}
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
out_rpm:
intel_gt_pm_put(vma->vm->gt);
return err;
}
static int gtt_get(struct drm_i915_gem_object *obj,
@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
u32 __iomem *map;
int err;
int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map))
return PTR_ERR(map);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto out_rpm;
}
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
return 0;
out_rpm:
intel_gt_pm_put(vma->vm->gt);
return err;
}
static int wc_set(struct drm_i915_gem_object *obj,
@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto unlock;
goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
}
}
}
unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
free:
kfree(offsets);
return err;
put_object:
i915_gem_object_put(obj);
goto unlock;
goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)

View File

@ -164,7 +164,6 @@ struct parallel_switch {
static int __live_parallel_switch1(void *data)
{
struct parallel_switch *arg = data;
struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
IGT_TIMEOUT(end_time);
unsigned long count;
@ -176,16 +175,12 @@ static int __live_parallel_switch1(void *data)
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
i915_request_put(rq);
mutex_lock(&i915->drm.struct_mutex);
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(rq))
return PTR_ERR(rq);
}
i915_request_get(rq);
i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
}
err = 0;
@ -205,7 +200,6 @@ static int __live_parallel_switch1(void *data)
static int __live_parallel_switchN(void *data)
{
struct parallel_switch *arg = data;
struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
IGT_TIMEOUT(end_time);
unsigned long count;
int n;
@ -215,15 +209,11 @@ static int __live_parallel_switchN(void *data)
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *rq;
mutex_lock(&i915->drm.struct_mutex);
rq = i915_request_create(arg->ce[n]);
if (IS_ERR(rq)) {
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(rq))
return PTR_ERR(rq);
}
i915_request_add(rq);
mutex_unlock(&i915->drm.struct_mutex);
}
count++;
@ -1173,7 +1163,7 @@ __sseu_test(const char *name,
if (ret)
return ret;
ret = __intel_context_reconfigure_sseu(ce, sseu);
ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
@ -1277,7 +1267,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_fail;
out_fail:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
ret = -EIO;
intel_context_unpin(ce);

View File

@ -581,12 +581,8 @@ static void disable_retire_worker(struct drm_i915_private *i915)
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_driver_register__shrinker(i915);
}

View File

@ -65,9 +65,7 @@ static int igt_fill_blt(void *arg)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@ -166,9 +164,7 @@ static int igt_copy_blt(void *arg)
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;

View File

@ -196,26 +196,14 @@ int intel_gt_resume(struct intel_gt *gt)
static void wait_for_idle(struct intel_gt *gt)
{
mutex_lock(&gt->i915->drm.struct_mutex); /* XXX */
do {
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
}
} while (i915_retire_requests(gt->i915));
mutex_unlock(&gt->i915->drm.struct_mutex);
if (i915_gem_wait_for_idle(gt->i915, 0,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
}
intel_gt_pm_wait_for_idle(gt);
}

View File

@ -318,7 +318,7 @@ static int live_active_context(void *arg)
if (err)
break;
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
err = igt_flush_test(gt->i915);
if (err)
break;
}
@ -431,7 +431,7 @@ static int live_remote_context(void *arg)
if (err)
break;
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
err = igt_flush_test(gt->i915);
if (err)
break;
}

View File

@ -58,7 +58,9 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
memset(h, 0, sizeof(*h));
h->gt = gt;
mutex_lock(&gt->i915->drm.struct_mutex);
h->ctx = kernel_context(gt->i915);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
@ -285,7 +287,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@ -309,10 +311,9 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
return err;
for_each_engine(engine, gt->i915, id) {
struct intel_wedge_me w;
@ -355,8 +356,6 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@ -395,8 +394,6 @@ static int igt_reset_nop(void *arg)
reset_count = i915_reset_count(global);
count = 0;
do {
mutex_lock(&gt->i915->drm.struct_mutex);
for_each_engine(engine, gt->i915, id) {
int i;
@ -417,7 +414,6 @@ static int igt_reset_nop(void *arg)
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
@ -429,16 +425,13 @@ static int igt_reset_nop(void *arg)
break;
}
err = igt_flush_test(gt->i915, 0);
err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
mutex_lock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
mutex_unlock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@ -494,7 +487,6 @@ static int igt_reset_nop_engine(void *arg)
break;
}
mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@ -507,7 +499,6 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
@ -533,15 +524,12 @@ static int igt_reset_nop_engine(void *arg)
if (err)
break;
err = igt_flush_test(gt->i915, 0);
err = igt_flush_test(gt->i915);
if (err)
break;
}
mutex_lock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
mutex_unlock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@ -563,9 +551,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
return 0;
if (active) {
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
@ -593,17 +579,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (active) {
struct i915_request *rq;
mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@ -647,7 +630,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
break;
err = igt_flush_test(gt->i915, 0);
err = igt_flush_test(gt->i915);
if (err)
break;
}
@ -655,11 +638,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (intel_gt_is_wedged(gt))
err = -EIO;
if (active) {
mutex_lock(&gt->i915->drm.struct_mutex);
if (active)
hang_fini(&h);
mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
}
@ -741,10 +721,8 @@ static int active_engine(void *data)
struct i915_request *old = rq[idx];
struct i915_request *new;
mutex_lock(&engine->i915->drm.struct_mutex);
new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
@ -755,7 +733,6 @@ static int active_engine(void *data)
rq[idx] = i915_request_get(new);
i915_request_add(new);
mutex_unlock(&engine->i915->drm.struct_mutex);
err = active_request_put(old);
if (err)
@ -795,9 +772,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
return 0;
if (flags & TEST_ACTIVE) {
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@ -855,17 +830,14 @@ static int __igt_reset_engines(struct intel_gt *gt,
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@ -977,9 +949,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (err)
break;
mutex_lock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
mutex_unlock(&gt->i915->drm.struct_mutex);
err = igt_flush_test(gt->i915);
if (err)
break;
}
@ -987,11 +957,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (intel_gt_is_wedged(gt))
err = -EIO;
if (flags & TEST_ACTIVE) {
mutex_lock(&gt->i915->drm.struct_mutex);
if (flags & TEST_ACTIVE)
hang_fini(&h);
mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
}
@ -1061,7 +1028,6 @@ static int igt_reset_wait(void *arg)
igt_global_reset_lock(gt);
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
@ -1109,7 +1075,6 @@ static int igt_reset_wait(void *arg)
fini:
hang_fini(&h);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@ -1189,10 +1154,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
/* Check that we can recover an unbind stuck on a hanging request */
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
return err;
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
@ -1255,8 +1219,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (err)
goto out_rq;
mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@ -1305,16 +1267,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
put_task_struct(tsk);
}
mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt))
return -EIO;
@ -1396,7 +1354,6 @@ static int igt_reset_queue(void *arg)
igt_global_reset_lock(gt);
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
@ -1511,7 +1468,7 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
err = igt_flush_test(gt->i915);
if (err)
break;
}
@ -1519,7 +1476,6 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@ -1546,11 +1502,9 @@ static int igt_handle_error(void *arg)
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto err_unlock;
return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@ -1574,8 +1528,6 @@ static int igt_handle_error(void *arg)
goto err_request;
}
mutex_unlock(&gt->i915->drm.struct_mutex);
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
@ -1583,8 +1535,6 @@ static int igt_handle_error(void *arg)
xchg(&global->first_error, error);
mutex_lock(&gt->i915->drm.struct_mutex);
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
@ -1595,8 +1545,6 @@ static int igt_handle_error(void *arg)
i915_request_put(rq);
err_fini:
hang_fini(&h);
err_unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@ -1689,7 +1637,6 @@ static int igt_reset_engines_atomic(void *arg)
return 0;
igt_global_reset_lock(gt);
mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
@ -1709,9 +1656,7 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
return err;
@ -1751,10 +1696,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
err = intel_gt_live_subtests(tests, gt);
mutex_lock(&gt->i915->drm.struct_mutex);
igt_flush_test(gt->i915, I915_WAIT_LOCKED);
mutex_unlock(&gt->i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);

View File

@ -61,7 +61,7 @@ static int live_sanitycheck(void *arg)
}
igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
if (igt_flush_test(i915)) {
err = -EIO;
goto err_ctx;
}
@ -384,8 +384,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
if (err)
goto out;
if (i915_request_wait(head,
I915_WAIT_LOCKED,
if (i915_request_wait(head, 0,
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
@ -457,7 +456,7 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
if (igt_flush_test(i915)) {
err = -EIO;
goto err_pin;
}
@ -1010,7 +1009,7 @@ static int live_nopreempt(void *arg)
goto err_wedged;
}
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
goto err_wedged;
}
@ -1075,7 +1074,7 @@ static int live_suppress_self_preempt(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
goto err_wedged;
intel_engine_pm_get(engine);
@ -1136,7 +1135,7 @@ static int live_suppress_self_preempt(void *arg)
}
intel_engine_pm_put(engine);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
goto err_wedged;
}
@ -1297,7 +1296,7 @@ static int live_suppress_wait_preempt(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
@ -1576,7 +1575,7 @@ static int live_preempt_hang(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
if (igt_flush_test(i915)) {
err = -EIO;
goto err_ctx_lo;
}
@ -1973,7 +1972,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
@ -2118,7 +2117,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
goto out;
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
@ -2296,7 +2295,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
kernel_context_close(ctx);

View File

@ -6,7 +6,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "../selftests/i915_random.h"
@ -136,7 +136,6 @@ static int mock_hwsp_freelist(void *arg)
goto err_put;
}
mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@ -149,7 +148,6 @@ static int mock_hwsp_freelist(void *arg)
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
drm_dev_put(&state.i915->drm);
@ -449,8 +447,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
struct i915_request *rq;
int err;
lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
@ -461,10 +457,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
if (IS_ERR(rq))
goto out_unpin;
i915_request_get(rq);
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
if (err)
if (err) {
i915_request_put(rq);
rq = ERR_PTR(err);
}
out_unpin:
intel_timeline_unpin(tl);
@ -500,7 +500,6 @@ static int live_hwsp_engine(void *arg)
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@ -515,14 +514,13 @@ static int live_hwsp_engine(void *arg)
if (!timelines)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
continue;
intel_engine_pm_get(engine);
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
@ -530,22 +528,26 @@ static int live_hwsp_engine(void *arg)
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
break;
}
timelines[count++] = tl;
i915_request_put(rq);
}
intel_engine_pm_put(engine);
if (err)
break;
}
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < count; n++) {
@ -559,11 +561,7 @@ static int live_hwsp_engine(void *arg)
intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
return err;
#undef NUM_TIMELINES
}
@ -575,7 +573,6 @@ static int live_hwsp_alternate(void *arg)
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@ -591,9 +588,6 @@ static int live_hwsp_alternate(void *arg)
if (!timelines)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
for_each_engine(engine, i915, id) {
@ -605,11 +599,14 @@ static int live_hwsp_alternate(void *arg)
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@ -617,11 +614,12 @@ static int live_hwsp_alternate(void *arg)
}
timelines[count++] = tl;
i915_request_put(rq);
}
}
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < count; n++) {
@ -635,11 +633,7 @@ static int live_hwsp_alternate(void *arg)
intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
return err;
#undef NUM_TIMELINES
}
@ -650,7 +644,6 @@ static int live_hwsp_wrap(void *arg)
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
/*
@ -658,14 +651,10 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
tl = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out_rpm;
}
if (IS_ERR(tl))
return PTR_ERR(tl);
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
@ -681,7 +670,9 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@ -747,16 +738,12 @@ static int live_hwsp_wrap(void *arg)
}
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -765,7 +752,6 @@ static int live_hwsp_recycle(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
@ -775,9 +761,6 @@ static int live_hwsp_recycle(void *arg)
* want to confuse ourselves or the GPU.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
@ -785,6 +768,8 @@ static int live_hwsp_recycle(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
intel_engine_pm_get(engine);
do {
struct intel_timeline *tl;
struct i915_request *rq;
@ -792,21 +777,22 @@ static int live_hwsp_recycle(void *arg)
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
goto out;
break;
}
if (*tl->hwsp_seqno != count) {
@ -815,17 +801,18 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
goto out;
break;
} while (!__igt_timeout(end_time, NULL));
}
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
intel_engine_pm_put(engine);
if (err)
break;
}
return err;
}

View File

@ -676,7 +676,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
break;
}
if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
if (igt_flush_test(ctx->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@ -1090,7 +1090,7 @@ static int live_isolated_whitelist(void *arg)
kernel_context_close(client[i].ctx);
}
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
return err;
@ -1248,7 +1248,7 @@ live_engine_reset_workarounds(void *arg)
igt_global_reset_unlock(&i915->gt);
kernel_context_close(ctx);
igt_flush_test(i915, I915_WAIT_LOCKED);
igt_flush_test(i915);
return ret;
}

View File

@ -3621,6 +3621,7 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
@ -3630,40 +3631,21 @@ i915_drop_caches_set(void *data, u64 val)
I915_IDLE_ENGINES_TIMEOUT))
intel_gt_set_wedged(&i915->gt);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
int ret;
if (val & DROP_RETIRE)
i915_retire_requests(i915);
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (val & (DROP_IDLE | DROP_ACTIVE)) {
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
}
/*
* To finish the flush of the idle_worker, we must complete
* the switch-to-kernel-context, which requires a double
* pass through wait_for_idle: first queues the switch,
* second waits for the switch.
*/
if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret == 0 && val & DROP_IDLE)
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (val & DROP_RETIRE)
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
if (ret == 0 && val & DROP_IDLE)
ret = intel_gt_pm_wait_for_idle(&i915->gt);
if (val & DROP_IDLE) {
ret = intel_gt_pm_wait_for_idle(&i915->gt);
if (ret)
return ret;
}
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))

View File

@ -945,19 +945,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
if (!intel_gt_pm_is_awake(gt))
return 0;
GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
do {
timeout = wait_for_timelines(gt, flags, timeout);
if (timeout < 0)
return timeout;
timeout = wait_for_timelines(gt, flags, timeout);
if (timeout < 0)
return timeout;
cond_resched();
if (signal_pending(current))
return -EINTR;
if (flags & I915_WAIT_LOCKED) {
lockdep_assert_held(&i915->drm.struct_mutex);
i915_retire_requests(i915);
}
} while (i915_retire_requests(i915));
return 0;
}

View File

@ -308,10 +308,9 @@ long i915_request_wait(struct i915_request *rq,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline bool i915_request_signaled(const struct i915_request *rq)
{

View File

@ -162,10 +162,8 @@ static int live_active_wait(void *arg)
__live_put(active);
mutex_lock(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -183,10 +181,8 @@ static int live_active_retire(void *arg)
return PTR_ERR(active);
/* waits for & retires all requests */
mutex_lock(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");

View File

@ -523,7 +523,7 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->ggtt.vm.mutex);
out_locked:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;

View File

@ -1705,12 +1705,8 @@ int i915_gem_gtt_mock_selftests(void)
err = i915_subtests(tests, ggtt);
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@ -2006,7 +2002,7 @@ static int igt_cs_tlb(void *arg)
}
}
end:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
i915_gem_context_unlock_engines(ctx);
i915_gem_object_unpin_map(out);

View File

@ -41,21 +41,16 @@ static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
goto out_unlock;
return -ENOMEM;
i915_request_add(request);
err = 0;
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
return 0;
}
static int igt_wait_request(void *arg)
@ -67,12 +62,10 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
}
if (!request)
return -ENOMEM;
i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
@ -125,9 +118,7 @@ static int igt_wait_request(void *arg)
err = 0;
out_request:
i915_request_put(request);
out_unlock:
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -140,52 +131,45 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
}
if (!request)
return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
goto out_locked;
goto out;
}
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
goto out_device;
goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
goto out_device;
goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
goto out_device;
goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
goto out_device;
goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
goto out_device;
goto out;
}
err = 0;
out_device:
mutex_lock(&i915->drm.struct_mutex);
out_locked:
out:
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -199,6 +183,8 @@ static int igt_request_rewind(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
mutex_unlock(&i915->drm.struct_mutex);
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
@ -211,7 +197,10 @@ static int igt_request_rewind(void *arg)
i915_request_get(request);
i915_request_add(request);
mutex_lock(&i915->drm.struct_mutex);
ctx[1] = mock_context(i915, "B");
mutex_unlock(&i915->drm.struct_mutex);
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
@ -233,7 +222,6 @@ static int igt_request_rewind(void *arg)
request->engine->submit_request(request);
rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
@ -248,14 +236,12 @@ static int igt_request_rewind(void *arg)
err = 0;
err:
i915_request_put(vip);
mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -282,7 +268,6 @@ __live_request_alloc(struct intel_context *ce)
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@ -337,14 +322,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_request *rq;
struct intel_context *ce;
mutex_lock(BKL);
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
rq = t->request_alloc(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
@ -357,8 +339,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
requests[n] = i915_request_get(rq);
i915_request_add(rq);
mutex_unlock(BKL);
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
@ -457,15 +437,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
goto out_threads;
}
mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
mutex_lock(&t.engine->i915->drm.struct_mutex);
t.contexts[n] = mock_context(t.engine->i915, "mock");
mutex_unlock(&t.engine->i915->drm.struct_mutex);
if (!t.contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
}
mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@ -495,18 +475,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
atomic_long_read(&t.num_fences),
ncpus);
mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
return ret;
}
@ -539,7 +516,6 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct igt_live_test t;
unsigned int id;
int err = -ENODEV;
@ -549,28 +525,25 @@ static int live_nop_request(void *arg)
* the overhead of submitting requests to the hardware.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
return err;
for_each_prime_number_from(prime, 1, 8192) {
struct i915_request *request = NULL;
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
i915_request_put(request);
request = i915_request_create(engine->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
}
if (IS_ERR(request))
return PTR_ERR(request);
/* This space is left intentionally blank.
*
@ -585,9 +558,11 @@ static int live_nop_request(void *arg)
* for latency.
*/
i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@ -599,7 +574,7 @@ static int live_nop_request(void *arg)
err = igt_live_test_end(&t);
if (err)
goto out_unlock;
return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@ -607,9 +582,6 @@ static int live_nop_request(void *arg)
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -679,6 +651,7 @@ empty_request(struct intel_engine_cs *engine,
if (err)
goto out_request;
i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
@ -688,7 +661,6 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
@ -699,14 +671,9 @@ static int live_empty_request(void *arg)
* the overhead of submitting requests to the hardware.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unlock;
}
if (IS_ERR(batch))
return PTR_ERR(batch);
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
@ -730,6 +697,7 @@ static int live_empty_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
@ -745,6 +713,7 @@ static int live_empty_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
i915_request_put(request);
err = igt_live_test_end(&t);
if (err)
@ -759,9 +728,6 @@ static int live_empty_request(void *arg)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -841,7 +807,6 @@ static int live_all_engines(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES];
intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
@ -852,18 +817,15 @@ static int live_all_engines(void *arg)
* block doing so, and that they don't complete too soon.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
return err;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
goto out_unlock;
return err;
}
for_each_engine(engine, i915, id) {
@ -933,9 +895,6 @@ static int live_all_engines(void *arg)
i915_request_put(request[id]);
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -945,7 +904,6 @@ static int live_sequential_engines(void *arg)
struct i915_request *request[I915_NUM_ENGINES] = {};
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
struct igt_live_test t;
unsigned int id;
int err;
@ -956,12 +914,9 @@ static int live_sequential_engines(void *arg)
* they are running on independent engines.
*/
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
return err;
for_each_engine(engine, i915, id) {
struct i915_vma *batch;
@ -971,7 +926,7 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
goto out_unlock;
return err;
}
request[id] = i915_request_create(engine->kernel_context);
@ -1063,9 +1018,6 @@ static int live_sequential_engines(void *arg)
i915_vma_put(request[id]->batch);
i915_request_put(request[id]);
}
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@ -1080,16 +1032,12 @@ static int __live_parallel_engine1(void *arg)
struct i915_request *rq;
int err;
mutex_lock(&engine->i915->drm.struct_mutex);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(rq))
return PTR_ERR(rq);
}
i915_request_get(rq);
i915_request_add(rq);
mutex_unlock(&engine->i915->drm.struct_mutex);
err = 0;
if (i915_request_wait(rq, 0, HZ / 5) < 0)
@ -1115,16 +1063,11 @@ static int __live_parallel_engineN(void *arg)
do {
struct i915_request *rq;
mutex_lock(&engine->i915->drm.struct_mutex);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(rq))
return PTR_ERR(rq);
}
i915_request_add(rq);
mutex_unlock(&engine->i915->drm.struct_mutex);
count++;
} while (!__igt_timeout(end_time, NULL));
@ -1154,9 +1097,7 @@ static int live_parallel_engines(void *arg)
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct igt_live_test t;
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, "");
mutex_unlock(&i915->drm.struct_mutex);
if (err)
break;
@ -1184,10 +1125,8 @@ static int live_parallel_engines(void *arg)
put_task_struct(tsk[id]);
}
mutex_lock(&i915->drm.struct_mutex);
if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
}
return err;
@ -1280,9 +1219,10 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_threads;
}
mutex_lock(&i915->drm.struct_mutex);
for (n = 0; n < t[0].ncontexts; n++) {
mutex_lock(&i915->drm.struct_mutex);
t[0].contexts[n] = live_context(i915, file);
mutex_unlock(&i915->drm.struct_mutex);
if (!t[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
@ -1299,7 +1239,6 @@ static int live_breadcrumbs_smoketest(void *arg)
t[id].max_batch = max_batches(t[0].contexts[0], engine);
if (t[id].max_batch < 0) {
ret = t[id].max_batch;
mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
@ -1314,7 +1253,6 @@ static int live_breadcrumbs_smoketest(void *arg)
&t[id], "igt/%d.%d", id, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
@ -1322,7 +1260,6 @@ static int live_breadcrumbs_smoketest(void *arg)
threads[id * ncpus + n] = tsk;
}
}
mutex_unlock(&i915->drm.struct_mutex);
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
@ -1350,10 +1287,8 @@ static int live_breadcrumbs_smoketest(void *arg)
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
mutex_unlock(&i915->drm.struct_mutex);
kfree(t[0].contexts);
out_threads:
kfree(threads);

View File

@ -263,10 +263,8 @@ int __i915_live_teardown(int err, void *data)
{
struct drm_i915_private *i915 = data;
mutex_lock(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
@ -284,10 +282,8 @@ int __intel_gt_live_teardown(int err, void *data)
{
struct intel_gt *gt = data;
mutex_lock(&gt->i915->drm.struct_mutex);
if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
if (igt_flush_test(gt->i915))
err = -EIO;
mutex_unlock(&gt->i915->drm.struct_mutex);
i915_gem_drain_freed_objects(gt->i915);

View File

@ -833,12 +833,8 @@ int i915_vma_mock_selftests(void)
err = i915_subtests(tests, ggtt);
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:

View File

@ -12,31 +12,25 @@
#include "igt_flush_test.h"
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
int igt_flush_test(struct drm_i915_private *i915)
{
int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
int repeat = !!(flags & I915_WAIT_LOCKED);
cond_resched();
do {
if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
i915_retire_requests(i915);
if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
GEM_TRACE("%pS timed out.\n",
__builtin_return_address(0));
GEM_TRACE_DUMP();
GEM_TRACE("%pS timed out.\n",
__builtin_return_address(0));
GEM_TRACE_DUMP();
intel_gt_set_wedged(&i915->gt);
repeat = 0;
ret = -EIO;
}
/* Ensure we also flush after wedging. */
if (flags & I915_WAIT_LOCKED)
i915_retire_requests(i915);
} while (repeat--);
intel_gt_set_wedged(&i915->gt);
ret = -EIO;
}
i915_retire_requests(i915);
return ret;
}

View File

@ -9,6 +9,6 @@
struct drm_i915_private;
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */

View File

@ -19,15 +19,12 @@ int igt_live_test_begin(struct igt_live_test *t,
enum intel_engine_id id;
int err;
lockdep_assert_held(&i915->drm.struct_mutex);
t->i915 = i915;
t->func = func;
t->name = name;
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&i915->drm.struct_mutex);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {

View File

@ -41,8 +41,6 @@ void mock_device_flush(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
lockdep_assert_held(&i915->drm.struct_mutex);
do {
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
@ -55,9 +53,7 @@ static void mock_device_release(struct drm_device *dev)
struct intel_engine_cs *engine;
enum intel_engine_id id;
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);