2017-02-14 00:15:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2017-02-14 00:15:24 +07:00
|
|
|
#include <linux/prime_numbers.h>
|
|
|
|
|
2017-02-14 00:15:21 +07:00
|
|
|
#include "../i915_selftest.h"
|
|
|
|
|
2017-02-23 14:44:18 +07:00
|
|
|
#include "mock_context.h"
|
2017-02-14 00:15:21 +07:00
|
|
|
#include "mock_gem_device.h"
|
|
|
|
|
|
|
|
static int igt_add_request(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:21 +07:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
/* Basic preliminary test to create a request and let it loose! */
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
request = mock_request(i915->engine[RCS],
|
|
|
|
i915->kernel_context,
|
|
|
|
HZ / 10);
|
|
|
|
if (!request)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(request);
|
2017-02-14 00:15:21 +07:00
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:22 +07:00
|
|
|
static int igt_wait_request(void *arg)
|
|
|
|
{
|
|
|
|
const long T = HZ / 4;
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:22 +07:00
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
/* Submit a request, then wait upon it */
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
request = mock_request(i915->engine[RCS], i915->kernel_context, T);
|
|
|
|
if (!request) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait succeeded (expected timeout before submit!)\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_completed(request)) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request completed before submit!!\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(request);
|
2017-02-14 00:15:22 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_completed(request)) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request completed immediately!\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait succeeded (expected timeout!)\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait timed out!\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (!i915_request_completed(request)) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request not complete after waiting!\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
|
2017-02-14 00:15:22 +07:00
|
|
|
pr_err("request wait timed out when already complete!\n");
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out_unlock:
|
|
|
|
mock_device_flush(i915);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:23 +07:00
|
|
|
static int igt_fence_wait(void *arg)
|
|
|
|
{
|
|
|
|
const long T = HZ / 4;
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:23 +07:00
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
/* Submit a request, treat it as a fence and wait upon it */
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
request = mock_request(i915->engine[RCS], i915->kernel_context, T);
|
|
|
|
if (!request) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_locked;
|
|
|
|
}
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
|
|
|
|
|
|
|
|
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
|
|
|
|
pr_err("fence wait success before submit (expected timeout)!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(request);
|
2017-02-14 00:15:23 +07:00
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
if (dma_fence_is_signaled(&request->fence)) {
|
|
|
|
pr_err("fence signaled immediately!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
|
|
|
|
pr_err("fence wait success after submit (expected timeout)!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
|
|
|
|
pr_err("fence wait timed out (expected success)!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dma_fence_is_signaled(&request->fence)) {
|
|
|
|
pr_err("fence unsignaled after waiting!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
|
|
|
|
pr_err("fence wait timed out when complete (expected success)!\n");
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out_device:
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
out_locked:
|
|
|
|
mock_device_flush(i915);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-23 14:44:18 +07:00
|
|
|
static int igt_request_rewind(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request, *vip;
|
2017-02-23 14:44:18 +07:00
|
|
|
struct i915_gem_context *ctx[2];
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
ctx[0] = mock_context(i915, "A");
|
|
|
|
request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
|
|
|
|
if (!request) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_context_0;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(request);
|
|
|
|
i915_request_add(request);
|
2017-02-23 14:44:18 +07:00
|
|
|
|
|
|
|
ctx[1] = mock_context(i915, "B");
|
|
|
|
vip = mock_request(i915->engine[RCS], ctx[1], 0);
|
|
|
|
if (!vip) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_context_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Simulate preemption by manual reordering */
|
|
|
|
if (!mock_cancel_request(request)) {
|
|
|
|
pr_err("failed to cancel request (already executed)!\n");
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(vip);
|
2017-02-23 14:44:18 +07:00
|
|
|
goto err_context_1;
|
|
|
|
}
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(vip);
|
|
|
|
i915_request_add(vip);
|
drm/i915: Use rcu instead of stop_machine in set_wedged
stop_machine is not really a locking primitive we should use, except
when the hw folks tell us the hw is broken and that's the only way to
work around it.
This patch tries to address the locking abuse of stop_machine() from
commit 20e4933c478a1ca694b38fa4ac44d99e659941f5
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue Nov 22 14:41:21 2016 +0000
drm/i915: Stop the machine as we install the wedged submit_request handler
Chris said parts of the reasons for going with stop_machine() was that
it's no overhead for the fast-path. But these callbacks use irqsave
spinlocks and do a bunch of MMIO, and rcu_read_lock is _real_ fast.
To stay as close as possible to the stop_machine semantics we first
update all the submit function pointers to the nop handler, then call
synchronize_rcu() to make sure no new requests can be submitted. This
should give us exactly the huge barrier we want.
I pondered whether we should annotate engine->submit_request as __rcu
and use rcu_assign_pointer and rcu_dereference on it. But the reason
behind those is to make sure the compiler/cpu barriers are there for
when you have an actual data structure you point at, to make sure all
the writes are seen correctly on the read side. But we just have a
function pointer, and .text isn't changed, so no need for these
barriers and hence no need for annotations.
Unfortunately there's a complication with the call to
intel_engine_init_global_seqno:
- Without stop_machine we must hold the corresponding spinlock.
- Without stop_machine we must ensure that all requests are marked as
having failed with dma_fence_set_error() before we call it. That
means we need to split the nop request submission into two phases,
both synchronized with rcu:
1. Only stop submitting the requests to hw and mark them as failed.
2. After all pending requests in the scheduler/ring are suitably
marked up as failed and we can force complete them all, also force
complete by calling intel_engine_init_global_seqno().
This should fix the followwing lockdep splat:
======================================================
WARNING: possible circular locking dependency detected
4.14.0-rc3-CI-CI_DRM_3179+ #1 Tainted: G U
------------------------------------------------------
kworker/3:4/562 is trying to acquire lock:
(cpu_hotplug_lock.rw_sem){++++}, at: [<ffffffff8113d4bc>] stop_machine+0x1c/0x40
but task is already holding lock:
(&dev->struct_mutex){+.+.}, at: [<ffffffffa0136588>] i915_reset_device+0x1e8/0x260 [i915]
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #6 (&dev->struct_mutex){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__mutex_lock+0x86/0x9b0
mutex_lock_interruptible_nested+0x1b/0x20
i915_mutex_lock_interruptible+0x51/0x130 [i915]
i915_gem_fault+0x209/0x650 [i915]
__do_fault+0x1e/0x80
__handle_mm_fault+0xa08/0xed0
handle_mm_fault+0x156/0x300
__do_page_fault+0x2c5/0x570
do_page_fault+0x28/0x250
page_fault+0x22/0x30
-> #5 (&mm->mmap_sem){++++}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__might_fault+0x68/0x90
_copy_to_user+0x23/0x70
filldir+0xa5/0x120
dcache_readdir+0xf9/0x170
iterate_dir+0x69/0x1a0
SyS_getdents+0xa5/0x140
entry_SYSCALL_64_fastpath+0x1c/0xb1
-> #4 (&sb->s_type->i_mutex_key#5){++++}:
down_write+0x3b/0x70
handle_create+0xcb/0x1e0
devtmpfsd+0x139/0x180
kthread+0x152/0x190
ret_from_fork+0x27/0x40
-> #3 ((complete)&req.done){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
wait_for_common+0x58/0x210
wait_for_completion+0x1d/0x20
devtmpfs_create_node+0x13d/0x160
device_add+0x5eb/0x620
device_create_groups_vargs+0xe0/0xf0
device_create+0x3a/0x40
msr_device_create+0x2b/0x40
cpuhp_invoke_callback+0xc9/0xbf0
cpuhp_thread_fun+0x17b/0x240
smpboot_thread_fn+0x18a/0x280
kthread+0x152/0x190
ret_from_fork+0x27/0x40
-> #2 (cpuhp_state-up){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
cpuhp_issue_call+0x133/0x1c0
__cpuhp_setup_state_cpuslocked+0x139/0x2a0
__cpuhp_setup_state+0x46/0x60
page_writeback_init+0x43/0x67
pagecache_init+0x3d/0x42
start_kernel+0x3a8/0x3fc
x86_64_start_reservations+0x2a/0x2c
x86_64_start_kernel+0x6d/0x70
verify_cpu+0x0/0xfb
-> #1 (cpuhp_state_mutex){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__mutex_lock+0x86/0x9b0
mutex_lock_nested+0x1b/0x20
__cpuhp_setup_state_cpuslocked+0x53/0x2a0
__cpuhp_setup_state+0x46/0x60
page_alloc_init+0x28/0x30
start_kernel+0x145/0x3fc
x86_64_start_reservations+0x2a/0x2c
x86_64_start_kernel+0x6d/0x70
verify_cpu+0x0/0xfb
-> #0 (cpu_hotplug_lock.rw_sem){++++}:
check_prev_add+0x430/0x840
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
cpus_read_lock+0x3d/0xb0
stop_machine+0x1c/0x40
i915_gem_set_wedged+0x1a/0x20 [i915]
i915_reset+0xb9/0x230 [i915]
i915_reset_device+0x1f6/0x260 [i915]
i915_handle_error+0x2d8/0x430 [i915]
hangcheck_declare_hang+0xd3/0xf0 [i915]
i915_hangcheck_elapsed+0x262/0x2d0 [i915]
process_one_work+0x233/0x660
worker_thread+0x4e/0x3b0
kthread+0x152/0x190
ret_from_fork+0x27/0x40
other info that might help us debug this:
Chain exists of:
cpu_hotplug_lock.rw_sem --> &mm->mmap_sem --> &dev->struct_mutex
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&dev->struct_mutex);
lock(&mm->mmap_sem);
lock(&dev->struct_mutex);
lock(cpu_hotplug_lock.rw_sem);
*** DEADLOCK ***
3 locks held by kworker/3:4/562:
#0: ("events_long"){+.+.}, at: [<ffffffff8109c64a>] process_one_work+0x1aa/0x660
#1: ((&(&i915->gpu_error.hangcheck_work)->work)){+.+.}, at: [<ffffffff8109c64a>] process_one_work+0x1aa/0x660
#2: (&dev->struct_mutex){+.+.}, at: [<ffffffffa0136588>] i915_reset_device+0x1e8/0x260 [i915]
stack backtrace:
CPU: 3 PID: 562 Comm: kworker/3:4 Tainted: G U 4.14.0-rc3-CI-CI_DRM_3179+ #1
Hardware name: /NUC7i5BNB, BIOS BNKBL357.86A.0048.2017.0704.1415 07/04/2017
Workqueue: events_long i915_hangcheck_elapsed [i915]
Call Trace:
dump_stack+0x68/0x9f
print_circular_bug+0x235/0x3c0
? lockdep_init_map_crosslock+0x20/0x20
check_prev_add+0x430/0x840
? irq_work_queue+0x86/0xe0
? wake_up_klogd+0x53/0x70
__lock_acquire+0x1420/0x15e0
? __lock_acquire+0x1420/0x15e0
? lockdep_init_map_crosslock+0x20/0x20
lock_acquire+0xb0/0x200
? stop_machine+0x1c/0x40
? i915_gem_object_truncate+0x50/0x50 [i915]
cpus_read_lock+0x3d/0xb0
? stop_machine+0x1c/0x40
stop_machine+0x1c/0x40
i915_gem_set_wedged+0x1a/0x20 [i915]
i915_reset+0xb9/0x230 [i915]
i915_reset_device+0x1f6/0x260 [i915]
? gen8_gt_irq_ack+0x170/0x170 [i915]
? work_on_cpu_safe+0x60/0x60
i915_handle_error+0x2d8/0x430 [i915]
? vsnprintf+0xd1/0x4b0
? scnprintf+0x3a/0x70
hangcheck_declare_hang+0xd3/0xf0 [i915]
? intel_runtime_pm_put+0x56/0xa0 [i915]
i915_hangcheck_elapsed+0x262/0x2d0 [i915]
process_one_work+0x233/0x660
worker_thread+0x4e/0x3b0
kthread+0x152/0x190
? process_one_work+0x660/0x660
? kthread_create_on_node+0x40/0x40
ret_from_fork+0x27/0x40
Setting dangerous option reset - tainting kernel
i915 0000:00:02.0: Resetting chip after gpu hang
Setting dangerous option reset - tainting kernel
i915 0000:00:02.0: Resetting chip after gpu hang
v2: Have 1 global synchronize_rcu() barrier across all engines, and
improve commit message.
v3: We need to protect the seqno update with the timeline spinlock (in
set_wedged) to avoid racing with other updates of the seqno, like we
already do in nop_submit_request (Chris).
v4: Use two-phase sequence to plug the race Chris spotted where we can
complete requests before they're marked up with -EIO.
v5: Review from Chris:
- simplify nop_submit_request.
- Add comment to rcu_read_lock section.
- Align comments with the new style.
v6: Remove unused variable to appease CI.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102886
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103096
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marta Lofstedt <marta.lofstedt@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171011091019.1425-1-daniel.vetter@ffwll.ch
2017-10-11 16:10:19 +07:00
|
|
|
rcu_read_lock();
|
2017-02-23 14:44:18 +07:00
|
|
|
request->engine->submit_request(request);
|
drm/i915: Use rcu instead of stop_machine in set_wedged
stop_machine is not really a locking primitive we should use, except
when the hw folks tell us the hw is broken and that's the only way to
work around it.
This patch tries to address the locking abuse of stop_machine() from
commit 20e4933c478a1ca694b38fa4ac44d99e659941f5
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue Nov 22 14:41:21 2016 +0000
drm/i915: Stop the machine as we install the wedged submit_request handler
Chris said parts of the reasons for going with stop_machine() was that
it's no overhead for the fast-path. But these callbacks use irqsave
spinlocks and do a bunch of MMIO, and rcu_read_lock is _real_ fast.
To stay as close as possible to the stop_machine semantics we first
update all the submit function pointers to the nop handler, then call
synchronize_rcu() to make sure no new requests can be submitted. This
should give us exactly the huge barrier we want.
I pondered whether we should annotate engine->submit_request as __rcu
and use rcu_assign_pointer and rcu_dereference on it. But the reason
behind those is to make sure the compiler/cpu barriers are there for
when you have an actual data structure you point at, to make sure all
the writes are seen correctly on the read side. But we just have a
function pointer, and .text isn't changed, so no need for these
barriers and hence no need for annotations.
Unfortunately there's a complication with the call to
intel_engine_init_global_seqno:
- Without stop_machine we must hold the corresponding spinlock.
- Without stop_machine we must ensure that all requests are marked as
having failed with dma_fence_set_error() before we call it. That
means we need to split the nop request submission into two phases,
both synchronized with rcu:
1. Only stop submitting the requests to hw and mark them as failed.
2. After all pending requests in the scheduler/ring are suitably
marked up as failed and we can force complete them all, also force
complete by calling intel_engine_init_global_seqno().
This should fix the followwing lockdep splat:
======================================================
WARNING: possible circular locking dependency detected
4.14.0-rc3-CI-CI_DRM_3179+ #1 Tainted: G U
------------------------------------------------------
kworker/3:4/562 is trying to acquire lock:
(cpu_hotplug_lock.rw_sem){++++}, at: [<ffffffff8113d4bc>] stop_machine+0x1c/0x40
but task is already holding lock:
(&dev->struct_mutex){+.+.}, at: [<ffffffffa0136588>] i915_reset_device+0x1e8/0x260 [i915]
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #6 (&dev->struct_mutex){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__mutex_lock+0x86/0x9b0
mutex_lock_interruptible_nested+0x1b/0x20
i915_mutex_lock_interruptible+0x51/0x130 [i915]
i915_gem_fault+0x209/0x650 [i915]
__do_fault+0x1e/0x80
__handle_mm_fault+0xa08/0xed0
handle_mm_fault+0x156/0x300
__do_page_fault+0x2c5/0x570
do_page_fault+0x28/0x250
page_fault+0x22/0x30
-> #5 (&mm->mmap_sem){++++}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__might_fault+0x68/0x90
_copy_to_user+0x23/0x70
filldir+0xa5/0x120
dcache_readdir+0xf9/0x170
iterate_dir+0x69/0x1a0
SyS_getdents+0xa5/0x140
entry_SYSCALL_64_fastpath+0x1c/0xb1
-> #4 (&sb->s_type->i_mutex_key#5){++++}:
down_write+0x3b/0x70
handle_create+0xcb/0x1e0
devtmpfsd+0x139/0x180
kthread+0x152/0x190
ret_from_fork+0x27/0x40
-> #3 ((complete)&req.done){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
wait_for_common+0x58/0x210
wait_for_completion+0x1d/0x20
devtmpfs_create_node+0x13d/0x160
device_add+0x5eb/0x620
device_create_groups_vargs+0xe0/0xf0
device_create+0x3a/0x40
msr_device_create+0x2b/0x40
cpuhp_invoke_callback+0xc9/0xbf0
cpuhp_thread_fun+0x17b/0x240
smpboot_thread_fn+0x18a/0x280
kthread+0x152/0x190
ret_from_fork+0x27/0x40
-> #2 (cpuhp_state-up){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
cpuhp_issue_call+0x133/0x1c0
__cpuhp_setup_state_cpuslocked+0x139/0x2a0
__cpuhp_setup_state+0x46/0x60
page_writeback_init+0x43/0x67
pagecache_init+0x3d/0x42
start_kernel+0x3a8/0x3fc
x86_64_start_reservations+0x2a/0x2c
x86_64_start_kernel+0x6d/0x70
verify_cpu+0x0/0xfb
-> #1 (cpuhp_state_mutex){+.+.}:
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
__mutex_lock+0x86/0x9b0
mutex_lock_nested+0x1b/0x20
__cpuhp_setup_state_cpuslocked+0x53/0x2a0
__cpuhp_setup_state+0x46/0x60
page_alloc_init+0x28/0x30
start_kernel+0x145/0x3fc
x86_64_start_reservations+0x2a/0x2c
x86_64_start_kernel+0x6d/0x70
verify_cpu+0x0/0xfb
-> #0 (cpu_hotplug_lock.rw_sem){++++}:
check_prev_add+0x430/0x840
__lock_acquire+0x1420/0x15e0
lock_acquire+0xb0/0x200
cpus_read_lock+0x3d/0xb0
stop_machine+0x1c/0x40
i915_gem_set_wedged+0x1a/0x20 [i915]
i915_reset+0xb9/0x230 [i915]
i915_reset_device+0x1f6/0x260 [i915]
i915_handle_error+0x2d8/0x430 [i915]
hangcheck_declare_hang+0xd3/0xf0 [i915]
i915_hangcheck_elapsed+0x262/0x2d0 [i915]
process_one_work+0x233/0x660
worker_thread+0x4e/0x3b0
kthread+0x152/0x190
ret_from_fork+0x27/0x40
other info that might help us debug this:
Chain exists of:
cpu_hotplug_lock.rw_sem --> &mm->mmap_sem --> &dev->struct_mutex
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&dev->struct_mutex);
lock(&mm->mmap_sem);
lock(&dev->struct_mutex);
lock(cpu_hotplug_lock.rw_sem);
*** DEADLOCK ***
3 locks held by kworker/3:4/562:
#0: ("events_long"){+.+.}, at: [<ffffffff8109c64a>] process_one_work+0x1aa/0x660
#1: ((&(&i915->gpu_error.hangcheck_work)->work)){+.+.}, at: [<ffffffff8109c64a>] process_one_work+0x1aa/0x660
#2: (&dev->struct_mutex){+.+.}, at: [<ffffffffa0136588>] i915_reset_device+0x1e8/0x260 [i915]
stack backtrace:
CPU: 3 PID: 562 Comm: kworker/3:4 Tainted: G U 4.14.0-rc3-CI-CI_DRM_3179+ #1
Hardware name: /NUC7i5BNB, BIOS BNKBL357.86A.0048.2017.0704.1415 07/04/2017
Workqueue: events_long i915_hangcheck_elapsed [i915]
Call Trace:
dump_stack+0x68/0x9f
print_circular_bug+0x235/0x3c0
? lockdep_init_map_crosslock+0x20/0x20
check_prev_add+0x430/0x840
? irq_work_queue+0x86/0xe0
? wake_up_klogd+0x53/0x70
__lock_acquire+0x1420/0x15e0
? __lock_acquire+0x1420/0x15e0
? lockdep_init_map_crosslock+0x20/0x20
lock_acquire+0xb0/0x200
? stop_machine+0x1c/0x40
? i915_gem_object_truncate+0x50/0x50 [i915]
cpus_read_lock+0x3d/0xb0
? stop_machine+0x1c/0x40
stop_machine+0x1c/0x40
i915_gem_set_wedged+0x1a/0x20 [i915]
i915_reset+0xb9/0x230 [i915]
i915_reset_device+0x1f6/0x260 [i915]
? gen8_gt_irq_ack+0x170/0x170 [i915]
? work_on_cpu_safe+0x60/0x60
i915_handle_error+0x2d8/0x430 [i915]
? vsnprintf+0xd1/0x4b0
? scnprintf+0x3a/0x70
hangcheck_declare_hang+0xd3/0xf0 [i915]
? intel_runtime_pm_put+0x56/0xa0 [i915]
i915_hangcheck_elapsed+0x262/0x2d0 [i915]
process_one_work+0x233/0x660
worker_thread+0x4e/0x3b0
kthread+0x152/0x190
? process_one_work+0x660/0x660
? kthread_create_on_node+0x40/0x40
ret_from_fork+0x27/0x40
Setting dangerous option reset - tainting kernel
i915 0000:00:02.0: Resetting chip after gpu hang
Setting dangerous option reset - tainting kernel
i915 0000:00:02.0: Resetting chip after gpu hang
v2: Have 1 global synchronize_rcu() barrier across all engines, and
improve commit message.
v3: We need to protect the seqno update with the timeline spinlock (in
set_wedged) to avoid racing with other updates of the seqno, like we
already do in nop_submit_request (Chris).
v4: Use two-phase sequence to plug the race Chris spotted where we can
complete requests before they're marked up with -EIO.
v5: Review from Chris:
- simplify nop_submit_request.
- Add comment to rcu_read_lock section.
- Align comments with the new style.
v6: Remove unused variable to appease CI.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102886
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103096
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marta Lofstedt <marta.lofstedt@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171011091019.1425-1-daniel.vetter@ffwll.ch
2017-10-11 16:10:19 +07:00
|
|
|
rcu_read_unlock();
|
2017-02-23 14:44:18 +07:00
|
|
|
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
|
2017-02-23 14:44:18 +07:00
|
|
|
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
|
|
|
|
vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_completed(request)) {
|
2017-02-23 14:44:18 +07:00
|
|
|
pr_err("low priority request already completed\n");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
err:
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(vip);
|
2017-02-23 14:44:18 +07:00
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
err_context_1:
|
|
|
|
mock_context_close(ctx[1]);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(request);
|
2017-02-23 14:44:18 +07:00
|
|
|
err_context_0:
|
|
|
|
mock_context_close(ctx[0]);
|
|
|
|
mock_device_flush(i915);
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
int i915_request_mock_selftests(void)
|
2017-02-14 00:15:21 +07:00
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(igt_add_request),
|
2017-02-14 00:15:22 +07:00
|
|
|
SUBTEST(igt_wait_request),
|
2017-02-14 00:15:23 +07:00
|
|
|
SUBTEST(igt_fence_wait),
|
2017-02-23 14:44:18 +07:00
|
|
|
SUBTEST(igt_request_rewind),
|
2017-02-14 00:15:21 +07:00
|
|
|
};
|
|
|
|
struct drm_i915_private *i915;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
i915 = mock_gem_device();
|
|
|
|
if (!i915)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = i915_subtests(tests, i915);
|
|
|
|
drm_dev_unref(&i915->drm);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2017-02-14 00:15:24 +07:00
|
|
|
|
|
|
|
struct live_test {
|
|
|
|
struct drm_i915_private *i915;
|
|
|
|
const char *func;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
unsigned int reset_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int begin_live_test(struct live_test *t,
|
|
|
|
struct drm_i915_private *i915,
|
|
|
|
const char *func,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
t->i915 = i915;
|
|
|
|
t->func = func;
|
|
|
|
t->name = name;
|
|
|
|
|
|
|
|
err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s(%s): failed to idle before, with err=%d!",
|
|
|
|
func, name, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915->gpu_error.missed_irq_rings = 0;
|
|
|
|
t->reset_count = i915_reset_count(&i915->gpu_error);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int end_live_test(struct live_test *t)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = t->i915;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_retire_requests(i915);
|
2017-04-01 02:21:21 +07:00
|
|
|
|
|
|
|
if (wait_for(intel_engines_are_idle(i915), 10)) {
|
2017-02-14 00:15:24 +07:00
|
|
|
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
|
|
|
|
pr_err("%s(%s): GPU was reset %d times!\n",
|
|
|
|
t->func, t->name,
|
|
|
|
i915_reset_count(&i915->gpu_error) - t->reset_count);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i915->gpu_error.missed_irq_rings) {
|
|
|
|
pr_err("%s(%s): Missed interrupts on engines %lx\n",
|
|
|
|
t->func, t->name, i915->gpu_error.missed_irq_rings);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_nop_request(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct live_test t;
|
|
|
|
unsigned int id;
|
2017-11-15 05:33:46 +07:00
|
|
|
int err = -ENODEV;
|
2017-02-14 00:15:24 +07:00
|
|
|
|
|
|
|
/* Submit various sized batches of empty requests, to each engine
|
|
|
|
* (individually), and wait for the batch to complete. We can check
|
|
|
|
* the overhead of submitting requests to the hardware.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
IGT_TIMEOUT(end_time);
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:24 +07:00
|
|
|
unsigned long n, prime;
|
|
|
|
ktime_t times[2] = {};
|
|
|
|
|
|
|
|
err = begin_live_test(&t, i915, __func__, engine->name);
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
for_each_prime_number_from(prime, 1, 8192) {
|
|
|
|
times[1] = ktime_get_raw();
|
|
|
|
|
|
|
|
for (n = 0; n < prime; n++) {
|
2018-02-21 16:56:36 +07:00
|
|
|
request = i915_request_alloc(engine,
|
|
|
|
i915->kernel_context);
|
2017-02-14 00:15:24 +07:00
|
|
|
if (IS_ERR(request)) {
|
|
|
|
err = PTR_ERR(request);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This space is left intentionally blank.
|
|
|
|
*
|
|
|
|
* We do not actually want to perform any
|
|
|
|
* action with this request, we just want
|
|
|
|
* to measure the latency in allocation
|
|
|
|
* and submission of our breadcrumbs -
|
|
|
|
* ensuring that the bare request is sufficient
|
|
|
|
* for the system to work (i.e. proper HEAD
|
|
|
|
* tracking of the rings, interrupt handling,
|
|
|
|
* etc). It also gives us the lowest bounds
|
|
|
|
* for latency.
|
|
|
|
*/
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(request);
|
2017-02-14 00:15:24 +07:00
|
|
|
}
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_wait(request,
|
2017-02-14 00:15:24 +07:00
|
|
|
I915_WAIT_LOCKED,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
|
|
|
|
times[1] = ktime_sub(ktime_get_raw(), times[1]);
|
|
|
|
if (prime == 1)
|
|
|
|
times[0] = times[1];
|
|
|
|
|
|
|
|
if (__igt_timeout(end_time, NULL))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = end_live_test(&t);
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
|
|
|
|
engine->name,
|
|
|
|
ktime_to_ns(times[0]),
|
|
|
|
prime, div64_u64(ktime_to_ns(times[1]), prime));
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:27 +07:00
|
|
|
static struct i915_vma *empty_batch(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
u32 *cmd;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
|
|
|
|
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(cmd)) {
|
|
|
|
err = PTR_ERR(cmd);
|
|
|
|
goto err;
|
|
|
|
}
|
2017-09-26 22:34:09 +07:00
|
|
|
|
2017-02-14 00:15:27 +07:00
|
|
|
*cmd = MI_BATCH_BUFFER_END;
|
2017-09-26 22:34:09 +07:00
|
|
|
i915_gem_chipset_flush(i915);
|
|
|
|
|
2017-02-14 00:15:27 +07:00
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
|
|
|
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return vma;
|
|
|
|
|
|
|
|
err:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
static struct i915_request *
|
2017-02-14 00:15:27 +07:00
|
|
|
empty_request(struct intel_engine_cs *engine,
|
|
|
|
struct i915_vma *batch)
|
|
|
|
{
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:27 +07:00
|
|
|
int err;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
request = i915_request_alloc(engine, engine->i915->kernel_context);
|
2017-02-14 00:15:27 +07:00
|
|
|
if (IS_ERR(request))
|
|
|
|
return request;
|
|
|
|
|
|
|
|
err = engine->emit_bb_start(request,
|
|
|
|
batch->node.start,
|
|
|
|
batch->node.size,
|
|
|
|
I915_DISPATCH_SECURE);
|
|
|
|
if (err)
|
|
|
|
goto out_request;
|
|
|
|
|
|
|
|
out_request:
|
2018-02-21 16:56:36 +07:00
|
|
|
__i915_request_add(request, err == 0);
|
2017-02-14 00:15:27 +07:00
|
|
|
return err ? ERR_PTR(err) : request;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_empty_request(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct live_test t;
|
|
|
|
struct i915_vma *batch;
|
|
|
|
unsigned int id;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Submit various sized batches of empty requests, to each engine
|
|
|
|
* (individually), and wait for the batch to complete. We can check
|
|
|
|
* the overhead of submitting requests to the hardware.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
batch = empty_batch(i915);
|
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
IGT_TIMEOUT(end_time);
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request;
|
2017-02-14 00:15:27 +07:00
|
|
|
unsigned long n, prime;
|
|
|
|
ktime_t times[2] = {};
|
|
|
|
|
|
|
|
err = begin_live_test(&t, i915, __func__, engine->name);
|
|
|
|
if (err)
|
|
|
|
goto out_batch;
|
|
|
|
|
|
|
|
/* Warmup / preload */
|
|
|
|
request = empty_request(engine, batch);
|
|
|
|
if (IS_ERR(request)) {
|
|
|
|
err = PTR_ERR(request);
|
|
|
|
goto out_batch;
|
|
|
|
}
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_wait(request,
|
2017-02-14 00:15:27 +07:00
|
|
|
I915_WAIT_LOCKED,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
|
|
|
|
for_each_prime_number_from(prime, 1, 8192) {
|
|
|
|
times[1] = ktime_get_raw();
|
|
|
|
|
|
|
|
for (n = 0; n < prime; n++) {
|
|
|
|
request = empty_request(engine, batch);
|
|
|
|
if (IS_ERR(request)) {
|
|
|
|
err = PTR_ERR(request);
|
|
|
|
goto out_batch;
|
|
|
|
}
|
|
|
|
}
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_wait(request,
|
2017-02-14 00:15:27 +07:00
|
|
|
I915_WAIT_LOCKED,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
|
|
|
|
times[1] = ktime_sub(ktime_get_raw(), times[1]);
|
|
|
|
if (prime == 1)
|
|
|
|
times[0] = times[1];
|
|
|
|
|
|
|
|
if (__igt_timeout(end_time, NULL))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = end_live_test(&t);
|
|
|
|
if (err)
|
|
|
|
goto out_batch;
|
|
|
|
|
|
|
|
pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
|
|
|
|
engine->name,
|
|
|
|
ktime_to_ns(times[0]),
|
|
|
|
prime, div64_u64(ktime_to_ns(times[1]), prime));
|
|
|
|
}
|
|
|
|
|
|
|
|
out_batch:
|
|
|
|
i915_vma_unpin(batch);
|
|
|
|
i915_vma_put(batch);
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:25 +07:00
|
|
|
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx = i915->kernel_context;
|
|
|
|
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
const int gen = INTEL_GEN(i915);
|
|
|
|
struct i915_vma *vma;
|
|
|
|
u32 *cmd;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2017-04-12 18:01:11 +07:00
|
|
|
err = i915_gem_object_set_to_wc_domain(obj, true);
|
2017-02-14 00:15:25 +07:00
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(cmd)) {
|
|
|
|
err = PTR_ERR(cmd);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gen >= 8) {
|
|
|
|
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
|
|
|
*cmd++ = lower_32_bits(vma->node.start);
|
|
|
|
*cmd++ = upper_32_bits(vma->node.start);
|
|
|
|
} else if (gen >= 6) {
|
|
|
|
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
|
|
|
|
*cmd++ = lower_32_bits(vma->node.start);
|
|
|
|
} else if (gen >= 4) {
|
|
|
|
*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
|
|
|
|
*cmd++ = lower_32_bits(vma->node.start);
|
|
|
|
} else {
|
|
|
|
*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
|
|
|
|
*cmd++ = lower_32_bits(vma->node.start);
|
|
|
|
}
|
|
|
|
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
|
2017-09-26 22:34:09 +07:00
|
|
|
i915_gem_chipset_flush(i915);
|
2017-02-14 00:15:25 +07:00
|
|
|
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
|
|
|
return vma;
|
|
|
|
|
|
|
|
err:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int recursive_batch_resolve(struct i915_vma *batch)
|
|
|
|
{
|
|
|
|
u32 *cmd;
|
|
|
|
|
|
|
|
cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
|
|
|
|
if (IS_ERR(cmd))
|
|
|
|
return PTR_ERR(cmd);
|
|
|
|
|
|
|
|
*cmd = MI_BATCH_BUFFER_END;
|
2017-09-26 22:34:09 +07:00
|
|
|
i915_gem_chipset_flush(batch->vm->i915);
|
2017-02-14 00:15:25 +07:00
|
|
|
|
|
|
|
i915_gem_object_unpin_map(batch->obj);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int live_all_engines(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
struct intel_engine_cs *engine;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request[I915_NUM_ENGINES];
|
2017-02-14 00:15:25 +07:00
|
|
|
struct i915_vma *batch;
|
|
|
|
struct live_test t;
|
|
|
|
unsigned int id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Check we can submit requests to all engines simultaneously. We
|
|
|
|
* send a recursive batch to each engine - checking that we don't
|
|
|
|
* block doing so, and that they don't complete too soon.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
err = begin_live_test(&t, i915, __func__, "");
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
batch = recursive_batch(i915);
|
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
2018-02-21 16:56:36 +07:00
|
|
|
request[id] = i915_request_alloc(engine, i915->kernel_context);
|
2017-02-14 00:15:25 +07:00
|
|
|
if (IS_ERR(request[id])) {
|
|
|
|
err = PTR_ERR(request[id]);
|
|
|
|
pr_err("%s: Request allocation failed with err=%d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = engine->emit_bb_start(request[id],
|
|
|
|
batch->node.start,
|
|
|
|
batch->node.size,
|
|
|
|
0);
|
|
|
|
GEM_BUG_ON(err);
|
|
|
|
request[id]->batch = batch;
|
|
|
|
|
|
|
|
if (!i915_gem_object_has_active_reference(batch->obj)) {
|
|
|
|
i915_gem_object_get(batch->obj);
|
|
|
|
i915_gem_object_set_active_reference(batch->obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_move_to_active(batch, request[id], 0);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(request[id]);
|
|
|
|
i915_request_add(request[id]);
|
2017-02-14 00:15:25 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_completed(request[id])) {
|
2017-02-14 00:15:25 +07:00
|
|
|
pr_err("%s(%s): request completed too early!\n",
|
|
|
|
__func__, engine->name);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = recursive_batch_resolve(batch);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
long timeout;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
timeout = i915_request_wait(request[id],
|
2017-02-14 00:15:25 +07:00
|
|
|
I915_WAIT_LOCKED,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
if (timeout < 0) {
|
|
|
|
err = timeout;
|
|
|
|
pr_err("%s: error waiting for request on %s, err=%d\n",
|
|
|
|
__func__, engine->name, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
GEM_BUG_ON(!i915_request_completed(request[id]));
|
|
|
|
i915_request_put(request[id]);
|
2017-02-14 00:15:25 +07:00
|
|
|
request[id] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = end_live_test(&t);
|
|
|
|
|
|
|
|
out_request:
|
|
|
|
for_each_engine(engine, i915, id)
|
|
|
|
if (request[id])
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(request[id]);
|
2017-02-14 00:15:25 +07:00
|
|
|
i915_vma_unpin(batch);
|
|
|
|
i915_vma_put(batch);
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:26 +07:00
|
|
|
static int live_sequential_engines(void *arg)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = arg;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *request[I915_NUM_ENGINES] = {};
|
|
|
|
struct i915_request *prev = NULL;
|
2017-02-14 00:15:26 +07:00
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
struct live_test t;
|
|
|
|
unsigned int id;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Check we can submit requests to all engines sequentially, such
|
|
|
|
* that each successive request waits for the earlier ones. This
|
|
|
|
* tests that we don't execute requests out of order, even though
|
|
|
|
* they are running on independent engines.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mutex_lock(&i915->drm.struct_mutex);
|
|
|
|
|
|
|
|
err = begin_live_test(&t, i915, __func__, "");
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
struct i915_vma *batch;
|
|
|
|
|
|
|
|
batch = recursive_batch(i915);
|
|
|
|
if (IS_ERR(batch)) {
|
|
|
|
err = PTR_ERR(batch);
|
|
|
|
pr_err("%s: Unable to create batch for %s, err=%d\n",
|
|
|
|
__func__, engine->name, err);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
request[id] = i915_request_alloc(engine, i915->kernel_context);
|
2017-02-14 00:15:26 +07:00
|
|
|
if (IS_ERR(request[id])) {
|
|
|
|
err = PTR_ERR(request[id]);
|
|
|
|
pr_err("%s: Request allocation failed for %s with err=%d\n",
|
|
|
|
__func__, engine->name, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prev) {
|
2018-02-21 16:56:36 +07:00
|
|
|
err = i915_request_await_dma_fence(request[id],
|
|
|
|
&prev->fence);
|
2017-02-14 00:15:26 +07:00
|
|
|
if (err) {
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(request[id]);
|
2017-02-14 00:15:26 +07:00
|
|
|
pr_err("%s: Request await failed for %s with err=%d\n",
|
|
|
|
__func__, engine->name, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = engine->emit_bb_start(request[id],
|
|
|
|
batch->node.start,
|
|
|
|
batch->node.size,
|
|
|
|
0);
|
|
|
|
GEM_BUG_ON(err);
|
|
|
|
request[id]->batch = batch;
|
|
|
|
|
|
|
|
i915_vma_move_to_active(batch, request[id], 0);
|
|
|
|
i915_gem_object_set_active_reference(batch->obj);
|
|
|
|
i915_vma_get(batch);
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_get(request[id]);
|
|
|
|
i915_request_add(request[id]);
|
2017-02-14 00:15:26 +07:00
|
|
|
|
|
|
|
prev = request[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
long timeout;
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
if (i915_request_completed(request[id])) {
|
2017-02-14 00:15:26 +07:00
|
|
|
pr_err("%s(%s): request completed too early!\n",
|
|
|
|
__func__, engine->name);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = recursive_batch_resolve(request[id]->batch);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: failed to resolve batch, err=%d\n",
|
|
|
|
__func__, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
timeout = i915_request_wait(request[id],
|
2017-02-14 00:15:26 +07:00
|
|
|
I915_WAIT_LOCKED,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
|
|
|
if (timeout < 0) {
|
|
|
|
err = timeout;
|
|
|
|
pr_err("%s: error waiting for request on %s, err=%d\n",
|
|
|
|
__func__, engine->name, err);
|
|
|
|
goto out_request;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
GEM_BUG_ON(!i915_request_completed(request[id]));
|
2017-02-14 00:15:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = end_live_test(&t);
|
|
|
|
|
|
|
|
out_request:
|
|
|
|
for_each_engine(engine, i915, id) {
|
|
|
|
u32 *cmd;
|
|
|
|
|
|
|
|
if (!request[id])
|
|
|
|
break;
|
|
|
|
|
|
|
|
cmd = i915_gem_object_pin_map(request[id]->batch->obj,
|
|
|
|
I915_MAP_WC);
|
|
|
|
if (!IS_ERR(cmd)) {
|
|
|
|
*cmd = MI_BATCH_BUFFER_END;
|
2017-09-26 22:34:09 +07:00
|
|
|
i915_gem_chipset_flush(i915);
|
|
|
|
|
2017-02-14 00:15:26 +07:00
|
|
|
i915_gem_object_unpin_map(request[id]->batch->obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_put(request[id]->batch);
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_put(request[id]);
|
2017-02-14 00:15:26 +07:00
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&i915->drm.struct_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
int i915_request_live_selftests(struct drm_i915_private *i915)
|
2017-02-14 00:15:24 +07:00
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(live_nop_request),
|
2017-02-14 00:15:25 +07:00
|
|
|
SUBTEST(live_all_engines),
|
2017-02-14 00:15:26 +07:00
|
|
|
SUBTEST(live_sequential_engines),
|
2017-02-14 00:15:27 +07:00
|
|
|
SUBTEST(live_empty_request),
|
2017-02-14 00:15:24 +07:00
|
|
|
};
|
|
|
|
return i915_subtests(tests, i915);
|
|
|
|
}
|