linux_dsm_epyc7002/drivers/gpu/drm/i915/i915_scheduler.c
Chris Wilson 7b1366b48c drm/i915: Reacquire priolist cache after dropping the engine lock
If we drop the engine lock, we may run execlists_dequeue which may free
the priolist. Therefore if we ever drop the execution lock on the
engine, we have to discard our cache and refetch the priolist to ensure
we do not use a stale pointer.

[  506.418935] [IGT] gem_exec_whisper: starting subtest contexts-priority
[  593.240825] general protection fault: 0000 [#1] SMP
[  593.240863] CPU: 1 PID: 494 Comm: gem_exec_whispe Tainted: G     U            5.0.0-rc6+ #100
[  593.240879] Hardware name:  /NUC6CAYB, BIOS AYAPLCEL.86A.0029.2016.1124.1625 11/24/2016
[  593.240965] RIP: 0010:__i915_schedule+0x1fe/0x320 [i915]
[  593.240981] Code: 48 8b 0c 24 48 89 c3 49 8b 45 28 49 8b 75 20 4c 89 3c 24 48 89 46 08 48 89 30 48 8b 43 08 48 89 4b 08 49 89 5d 20 49 89 45 28 <48> 89 08 45 39 a7 b8 03 00 00 7d 44 45 89 a7 b8 03 00 00 49 8b 85
[  593.240999] RSP: 0018:ffffc90000057a60 EFLAGS: 00010046
[  593.241013] RAX: 6b6b6b6b6b6b6b6b RBX: ffff8882582d7870 RCX: ffff88826baba6f0
[  593.241026] RDX: 0000000000000000 RSI: ffff8882582d6e70 RDI: ffff888273482194
[  593.241049] RBP: ffffc90000057a68 R08: ffff8882582d7680 R09: ffff8882582d7840
[  593.241068] R10: 0000000000000000 R11: ffffea00095ebe08 R12: 0000000000000728
[  593.241105] R13: ffff88826baba6d0 R14: ffffc90000057a40 R15: ffff888273482158
[  593.241120] FS:  00007f4613fb3900(0000) GS:ffff888277a80000(0000) knlGS:0000000000000000
[  593.241133] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  593.241146] CR2: 00007f57d3c66a84 CR3: 000000026e2b6000 CR4: 00000000001406e0
[  593.241158] Call Trace:
[  593.241233]  i915_schedule+0x1f/0x30 [i915]
[  593.241326]  i915_request_add+0x1a9/0x290 [i915]
[  593.241393]  i915_gem_do_execbuffer+0x45f/0x1150 [i915]
[  593.241411]  ? init_object+0x49/0x80
[  593.241425]  ? ___slab_alloc.constprop.91+0x4b8/0x4e0
[  593.241491]  ? i915_gem_execbuffer2_ioctl+0x99/0x380 [i915]
[  593.241563]  ? i915_gem_execbuffer_ioctl+0x270/0x270 [i915]
[  593.241629]  i915_gem_execbuffer2_ioctl+0x1bb/0x380 [i915]
[  593.241705]  ? i915_gem_execbuffer_ioctl+0x270/0x270 [i915]
[  593.241724]  drm_ioctl_kernel+0x81/0xd0
[  593.241738]  drm_ioctl+0x1a7/0x310
[  593.241803]  ? i915_gem_execbuffer_ioctl+0x270/0x270 [i915]
[  593.241819]  ? __update_load_avg_se+0x1c9/0x240
[  593.241834]  ? pick_next_entity+0x7e/0x120
[  593.241851]  do_vfs_ioctl+0x88/0x5d0
[  593.241880]  ksys_ioctl+0x35/0x70
[  593.241894]  __x64_sys_ioctl+0x11/0x20
[  593.241907]  do_syscall_64+0x44/0xf0
[  593.241924]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  593.241940] RIP: 0033:0x7f4615ffe757
[  593.241952] Code: 00 00 90 48 8b 05 39 a7 0c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 09 a7 0c 00 f7 d8 64 89 01 48
[  593.241970] RSP: 002b:00007ffc1030ddf8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  593.241984] RAX: ffffffffffffffda RBX: 00007ffc10324420 RCX: 00007f4615ffe757
[  593.241997] RDX: 00007ffc1030e220 RSI: 0000000040406469 RDI: 0000000000000003
[  593.242010] RBP: 00007ffc1030e220 R08: 00007f46160c9208 R09: 00007f46160c9240
[  593.242022] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000040406469
[  593.242038] R13: 0000000000000003 R14: 0000000000000000 R15: 0000000000000000
[  593.242058] Modules linked in: i915 intel_gtt drm_kms_helper prime_numbers

v2: Track the local engine cache and explicitly clear it when switching
engine locks.

Fixes: a02eb975be ("drm/i915/execlists: Cache the priolist when rescheduling")
Testcase: igt/gem_exec_whisper/contexts-priority # rare!
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190211204647.26723-1-chris@chris-wilson.co.uk
(cherry picked from commit ed7dc67774)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2019-03-08 09:52:36 -08:00

478 lines
12 KiB
C

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#include <linux/mutex.h>
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
static struct i915_global_scheduler {
struct i915_global base;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_priorities;
} global;
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
node_to_request(const struct i915_sched_node *node)
{
return container_of(node, const struct i915_request, sched);
}
static inline bool node_started(const struct i915_sched_node *node)
{
return i915_request_started(node_to_request(node));
}
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
}
void i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
node->flags = 0;
}
static struct i915_dependency *
i915_dependency_alloc(void)
{
return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
i915_dependency_free(struct i915_dependency *dep)
{
kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags)
{
bool ret = false;
spin_lock(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE;
ret = true;
}
spin_unlock(&schedule_lock);
return ret;
}
int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
return 0;
}
void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
spin_lock(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
spin_unlock(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
static void assert_priolists(struct intel_engine_execlists * const execlists)
{
struct rb_node *rb;
long last_prio, i;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
GEM_BUG_ON(p->priority >= last_prio);
last_prio = p->priority;
GEM_BUG_ON(!p->used);
for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
if (list_empty(&p->requests[i]))
continue;
GEM_BUG_ON(!(p->used & BIT(i)));
}
}
}
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
int idx, i;
lockdep_assert_held(&engine->timeline.lock);
assert_priolists(execlists);
/* buckets sorted from highest [in slot 0] to lowest priority */
idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
prio >>= I915_USER_PRIORITY_SHIFT;
if (unlikely(execlists->no_priolist))
prio = I915_PRIORITY_NORMAL;
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
parent = &execlists->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
if (prio > p->priority) {
parent = &rb->rb_left;
} else if (prio < p->priority) {
parent = &rb->rb_right;
first = false;
} else {
goto out;
}
}
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
/* To maintain ordering with all rendering, after an
* allocation failure we have to disable all scheduling.
* Requests will then be executed in fifo, and schedule
* will ensure that dependencies are emitted in fifo.
* There will be still some reordering with existing
* requests, so if userspace lied about their
* dependencies that reordering may be visible.
*/
execlists->no_priolist = true;
goto find_priolist;
}
}
p->priority = prio;
for (i = 0; i < ARRAY_SIZE(p->requests); i++)
INIT_LIST_HEAD(&p->requests[i]);
rb_link_node(&p->node, rb, parent);
rb_insert_color_cached(&p->node, &execlists->queue, first);
p->used = 0;
out:
p->used |= BIT(idx);
return &p->requests[idx];
}
struct sched_cache {
struct list_head *priolist;
};
static struct intel_engine_cs *
sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked,
struct sched_cache *cache)
{
struct intel_engine_cs *engine = node_to_request(node)->engine;
GEM_BUG_ON(!locked);
if (engine != locked) {
spin_unlock(&locked->timeline.lock);
memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock);
}
return engine;
}
static bool inflight(const struct i915_request *rq,
const struct intel_engine_cs *engine)
{
const struct i915_request *active;
if (!i915_request_is_active(rq))
return false;
active = port_request(engine->execlists.port);
return active->hw_context == rq->hw_context;
}
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
if (i915_request_completed(rq))
return;
if (prio <= READ_ONCE(rq->sched.attr.priority))
return;
stack.signaler = &rq->sched;
list_add(&stack.dfs_link, &dfs);
/*
* Recursively bump all dependent priorities to match the new request.
*
* A naive approach would be to use recursion:
* static void update_priorities(struct i915_sched_node *node, prio) {
* list_for_each_entry(dep, &node->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
* queue_request(node);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
* a flat list of all dependencies starting with the current request.
* As we walk the list of dependencies, we add all of its dependencies
* to the end of the list (this may include an already visited
* request) and continue to walk onwards onto the new dependencies. The
* end result is a topological list of requests in reverse order, the
* last element in the list is the request we must execute first.
*/
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
/* If we are already flying, we know we have no signalers */
if (node_started(node))
continue;
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
* (redundant dependencies are not eliminated) and across
* engines.
*/
list_for_each_entry(p, &node->signalers_list, signal_link) {
GEM_BUG_ON(p == dep); /* no cycles! */
if (node_signaled(p->signaler))
continue;
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
}
/*
* If we didn't need to bump any existing priorities, and we haven't
* yet submitted this request (i.e. there is no potential race with
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&rq->sched.link));
rq->sched.attr = *attr;
if (stack.dfs_link.next == stack.dfs_link.prev)
return;
__list_del_entry(&stack.dfs_link);
}
memset(&cache, 0, sizeof(cache));
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
INIT_LIST_HEAD(&dep->dfs_link);
engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
continue;
node->attr.priority = prio;
if (!list_empty(&node->link)) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
prio);
list_move_tail(&node->link, cache.priolist);
} else {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
* to our preemption decisions. On the other hand,
* if the request is on the HW, it too is not in the
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
if (!i915_sw_fence_done(&node_to_request(node)->submit))
continue;
}
if (prio <= engine->execlists.queue_priority_hint)
continue;
engine->execlists.queue_priority_hint = prio;
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
if (inflight(node_to_request(node), engine))
continue;
/* Defer (tasklet) submission until after all of our updates. */
tasklet_hi_schedule(&engine->execlists.tasklet);
}
spin_unlock_irq(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
spin_lock(&schedule_lock);
__i915_schedule(rq, attr);
spin_unlock(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
spin_lock_bh(&schedule_lock);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
spin_unlock_bh(&schedule_lock);
}
void __i915_priolist_free(struct i915_priolist *p)
{
kmem_cache_free(global.slab_priorities, p);
}
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_priorities);
}
static void i915_global_scheduler_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_priorities);
}
static struct i915_global_scheduler global = { {
.shrink = i915_global_scheduler_shrink,
.exit = i915_global_scheduler_exit,
} };
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
SLAB_HWCACHE_ALIGN);
if (!global.slab_dependencies)
return -ENOMEM;
global.slab_priorities = KMEM_CACHE(i915_priolist,
SLAB_HWCACHE_ALIGN);
if (!global.slab_priorities)
goto err_priorities;
i915_global_register(&global.base);
return 0;
err_priorities:
kmem_cache_destroy(global.slab_priorities);
return -ENOMEM;
}