2016-09-09 20:11:41 +07:00
|
|
|
/*
|
2018-11-29 08:30:51 +07:00
|
|
|
* SPDX-License-Identifier: MIT
|
2016-09-09 20:11:41 +07:00
|
|
|
*
|
2018-11-29 08:30:51 +07:00
|
|
|
* (C) Copyright 2016 Intel Corporation
|
2016-09-09 20:11:41 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
2016-10-25 19:00:45 +07:00
|
|
|
#include <linux/dma-fence.h>
|
2017-09-11 15:41:26 +07:00
|
|
|
#include <linux/irq_work.h>
|
2019-08-11 15:06:32 +07:00
|
|
|
#include <linux/dma-resv.h>
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
#include "i915_sw_fence.h"
|
2017-05-17 19:09:57 +07:00
|
|
|
#include "i915_selftest.h"
|
2016-09-09 20:11:41 +07:00
|
|
|
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 16:49:24 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
|
|
|
|
#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
|
|
|
|
#else
|
|
|
|
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
|
|
|
#endif
|
|
|
|
|
2016-10-28 19:58:25 +07:00
|
|
|
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
static DEFINE_SPINLOCK(i915_sw_fence_lock);
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
enum {
|
|
|
|
DEBUG_FENCE_IDLE = 0,
|
|
|
|
DEBUG_FENCE_NOTIFY,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void *i915_sw_fence_debug_hint(void *addr)
|
|
|
|
{
|
|
|
|
return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
|
|
|
|
}
|
|
|
|
|
2018-09-14 19:40:07 +07:00
|
|
|
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
static struct debug_obj_descr i915_sw_fence_debug_descr = {
|
|
|
|
.name = "i915_sw_fence",
|
|
|
|
.debug_hint = i915_sw_fence_debug_hint,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
2017-10-12 19:57:25 +07:00
|
|
|
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_activate(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_set_state(struct i915_sw_fence *fence,
|
|
|
|
int old, int new)
|
|
|
|
{
|
|
|
|
debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_free(fence, &i915_sw_fence_debug_descr);
|
2017-01-14 04:43:35 +07:00
|
|
|
smp_wmb(); /* flush the change in state before reallocation */
|
2016-11-25 20:17:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_assert(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void debug_fence_init(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-10-12 19:57:25 +07:00
|
|
|
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_set_state(struct i915_sw_fence *fence,
|
|
|
|
int old, int new)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_fence_assert(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
|
|
|
|
enum i915_sw_fence_notify state)
|
|
|
|
{
|
|
|
|
i915_sw_fence_notify_t fn;
|
|
|
|
|
|
|
|
fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
|
|
|
|
return fn(fence, state);
|
|
|
|
}
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
|
|
|
void i915_sw_fence_fini(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_fence_free(fence);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
|
|
|
struct list_head *continuation)
|
|
|
|
{
|
|
|
|
wait_queue_head_t *x = &fence->wait;
|
2017-06-20 17:06:13 +07:00
|
|
|
wait_queue_entry_t *pos, *next;
|
2016-09-09 20:11:41 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_deactivate(fence);
|
2016-09-09 20:11:41 +07:00
|
|
|
atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To prevent unbounded recursion as we traverse the graph of
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
* i915_sw_fences, we move the entry list from this, the next ready
|
|
|
|
* fence, to the tail of the original fence's entry list
|
2016-09-09 20:11:41 +07:00
|
|
|
* (and so added to the list to be woken).
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
|
|
|
|
if (continuation) {
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_for_each_entry_safe(pos, next, &x->head, entry) {
|
2016-09-09 20:11:41 +07:00
|
|
|
if (pos->func == autoremove_wake_function)
|
|
|
|
pos->func(pos, TASK_NORMAL, 0, continuation);
|
|
|
|
else
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_move_tail(&pos->entry, continuation);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LIST_HEAD(extra);
|
|
|
|
|
|
|
|
do {
|
2019-08-18 06:25:11 +07:00
|
|
|
list_for_each_entry_safe(pos, next, &x->head, entry) {
|
|
|
|
pos->func(pos,
|
|
|
|
TASK_NORMAL, fence->error,
|
|
|
|
&extra);
|
|
|
|
}
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
if (list_empty(&extra))
|
|
|
|
break;
|
|
|
|
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_splice_tail_init(&extra, &x->head);
|
2016-09-09 20:11:41 +07:00
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&x->lock, flags);
|
2016-11-25 20:17:18 +07:00
|
|
|
|
|
|
|
debug_fence_assert(fence);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
|
|
|
|
struct list_head *continuation)
|
|
|
|
{
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(fence);
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
if (!atomic_dec_and_test(&fence->pending))
|
|
|
|
return;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
|
|
|
|
|
2017-05-17 19:09:56 +07:00
|
|
|
if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
|
2016-09-09 20:11:41 +07:00
|
|
|
return;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
__i915_sw_fence_wake_up_all(fence, continuation);
|
2017-05-17 19:09:56 +07:00
|
|
|
|
|
|
|
debug_fence_destroy(fence);
|
|
|
|
__i915_sw_fence_notify(fence, FENCE_FREE);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2019-03-02 00:09:00 +07:00
|
|
|
void i915_sw_fence_complete(struct i915_sw_fence *fence)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(fence);
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
if (WARN_ON(i915_sw_fence_done(fence)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__i915_sw_fence_complete(fence, NULL);
|
|
|
|
}
|
|
|
|
|
2020-02-11 21:48:31 +07:00
|
|
|
bool i915_sw_fence_await(struct i915_sw_fence *fence)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2020-02-11 21:48:31 +07:00
|
|
|
int pending;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is only safe to add a new await to the fence while it has
|
|
|
|
* not yet been signaled (i.e. there are still existing signalers).
|
|
|
|
*/
|
|
|
|
pending = atomic_read(&fence->pending);
|
|
|
|
do {
|
|
|
|
if (pending < 1)
|
|
|
|
return false;
|
|
|
|
} while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
|
|
|
|
|
|
|
|
return true;
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2016-11-15 03:40:56 +07:00
|
|
|
void __i915_sw_fence_init(struct i915_sw_fence *fence,
|
|
|
|
i915_sw_fence_notify_t fn,
|
|
|
|
const char *name,
|
|
|
|
struct lock_class_key *key)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2017-05-17 19:09:56 +07:00
|
|
|
BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
|
2016-09-09 20:11:41 +07:00
|
|
|
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 16:49:24 +07:00
|
|
|
__init_waitqueue_head(&fence->wait, name, key);
|
|
|
|
fence->flags = (unsigned long)fn;
|
|
|
|
|
|
|
|
i915_sw_fence_reinit(fence);
|
|
|
|
}
|
|
|
|
|
|
|
|
void i915_sw_fence_reinit(struct i915_sw_fence *fence)
|
|
|
|
{
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_init(fence);
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
atomic_set(&fence->pending, 1);
|
2019-08-18 06:25:11 +07:00
|
|
|
fence->error = 0;
|
|
|
|
|
drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.
Quoting Linus Torvalds:
> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?
.. because the object can be accessed (by RCU) after the refcount has
gone down to zero, and the thing has been released.
That's the whole and only point of SLAB_TYPESAFE_BY_RCU.
That flag basically says:
"I may end up accessing this object *after* it has been free'd,
because there may be RCU lookups in flight"
This has nothing to do with constructors. It's ok if the object gets
reused as an object of the same type and does *not* get
re-initialized, because we're perfectly fine seeing old stale data.
What it guarantees is that the slab isn't shared with any other kind
of object, _and_ that the underlying pages are free'd after an RCU
quiescent period (so the pages aren't shared with another kind of
object either during an RCU walk).
And it doesn't necessarily have to have a constructor, because the
thing that a RCU walk will care about is
(a) guaranteed to be an object that *has* been on some RCU list (so
it's not a "new" object)
(b) the RCU walk needs to have logic to verify that it's still the
*same* object and hasn't been re-used as something else.
In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
immediately, but because it gets reused as the same kind of object,
the RCU walker can "know" what parts have meaning for re-use, in a way
it couidn't if the re-use was random.
That said, it *is* subtle, and people should be careful.
> So the re-use might initialize the fields lazily, not necessarily using a ctor.
If you have a well-defined refcount, and use "atomic_inc_not_zero()"
to guard the speculative RCU access section, and use
"atomic_dec_and_test()" in the freeing section, then you should be
safe wrt new allocations.
If you have a completely new allocation that has "random stale
content", you know that it cannot be on the RCU list, so there is no
speculative access that can ever see that random content.
So the only case you need to worry about is a re-use allocation, and
you know that the refcount will start out as zero even if you don't
have a constructor.
So you can think of the refcount itself as always having a zero
constructor, *BUT* you need to be careful with ordering.
In particular, whoever does the allocation needs to then set the
refcount to a non-zero value *after* it has initialized all the other
fields. And in particular, it needs to make sure that it uses the
proper memory ordering to do so.
NOTE! One thing to be very worried about is that re-initializing
whatever RCU lists means that now the RCU walker may be walking on the
wrong list so the walker may do the right thing for this particular
entry, but it may miss walking *other* entries. So then you can get
spurious lookup failures, because the RCU walker never walked all the
way to the end of the right list. That ends up being a much more
subtle bug.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
2019-11-22 16:49:24 +07:00
|
|
|
I915_SW_FENCE_BUG_ON(!fence->flags);
|
|
|
|
I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
|
|
|
{
|
|
|
|
debug_fence_activate(fence);
|
2017-05-17 19:09:56 +07:00
|
|
|
i915_sw_fence_complete(fence);
|
2016-11-25 20:17:18 +07:00
|
|
|
}
|
|
|
|
|
2017-06-20 17:06:13 +07:00
|
|
|
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_set_error_once(wq->private, flags);
|
|
|
|
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_del(&wq->entry);
|
2016-09-09 20:11:41 +07:00
|
|
|
__i915_sw_fence_complete(wq->private, key);
|
2017-05-17 19:09:56 +07:00
|
|
|
|
2016-10-28 19:58:25 +07:00
|
|
|
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
|
|
|
|
kfree(wq);
|
2016-09-09 20:11:41 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
|
|
|
const struct i915_sw_fence * const signaler)
|
|
|
|
{
|
2017-06-20 17:06:13 +07:00
|
|
|
wait_queue_entry_t *wq;
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (fence == signaler)
|
|
|
|
return true;
|
|
|
|
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_for_each_entry(wq, &fence->wait.head, entry) {
|
2016-09-09 20:11:41 +07:00
|
|
|
if (wq->func != i915_sw_fence_wake)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (__i915_sw_fence_check_if_after(wq->private, signaler))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
|
|
|
|
{
|
2017-06-20 17:06:13 +07:00
|
|
|
wait_queue_entry_t *wq;
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
|
|
|
return;
|
|
|
|
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
list_for_each_entry(wq, &fence->wait.head, entry) {
|
2016-09-09 20:11:41 +07:00
|
|
|
if (wq->func != i915_sw_fence_wake)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
__i915_sw_fence_clear_checked_bit(wq->private);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
|
|
|
const struct i915_sw_fence * const signaler)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool err;
|
|
|
|
|
2017-05-17 19:09:57 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
|
2016-09-09 20:11:41 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&i915_sw_fence_lock, flags);
|
|
|
|
err = __i915_sw_fence_check_if_after(fence, signaler);
|
|
|
|
__i915_sw_fence_clear_checked_bit(fence);
|
|
|
|
spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-28 19:58:25 +07:00
|
|
|
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|
|
|
struct i915_sw_fence *signaler,
|
2017-06-20 17:06:13 +07:00
|
|
|
wait_queue_entry_t *wq, gfp_t gfp)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int pending;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(fence);
|
2017-12-13 01:06:50 +07:00
|
|
|
might_sleep_if(gfpflags_allow_blocking(gfp));
|
2016-11-25 20:17:18 +07:00
|
|
|
|
2019-08-18 06:25:11 +07:00
|
|
|
if (i915_sw_fence_done(signaler)) {
|
|
|
|
i915_sw_fence_set_error_once(fence, signaler->error);
|
2016-09-09 20:11:41 +07:00
|
|
|
return 0;
|
2019-08-18 06:25:11 +07:00
|
|
|
}
|
2016-09-09 20:11:41 +07:00
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(signaler);
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
/* The dependency graph must be acyclic. */
|
|
|
|
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-28 19:58:25 +07:00
|
|
|
pending = 0;
|
|
|
|
if (!wq) {
|
|
|
|
wq = kmalloc(sizeof(*wq), gfp);
|
|
|
|
if (!wq) {
|
|
|
|
if (!gfpflags_allow_blocking(gfp))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_sw_fence_wait(signaler);
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_set_error_once(fence, signaler->error);
|
2016-10-28 19:58:25 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pending |= I915_SW_FENCE_FLAG_ALLOC;
|
|
|
|
}
|
|
|
|
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 17:06:46 +07:00
|
|
|
INIT_LIST_HEAD(&wq->entry);
|
2016-10-28 19:58:25 +07:00
|
|
|
wq->flags = pending;
|
2016-09-09 20:11:41 +07:00
|
|
|
wq->func = i915_sw_fence_wake;
|
2017-05-17 19:09:56 +07:00
|
|
|
wq->private = fence;
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
i915_sw_fence_await(fence);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&signaler->wait.lock, flags);
|
|
|
|
if (likely(!i915_sw_fence_done(signaler))) {
|
2017-06-20 17:06:13 +07:00
|
|
|
__add_wait_queue_entry_tail(&signaler->wait, wq);
|
2016-09-09 20:11:41 +07:00
|
|
|
pending = 1;
|
|
|
|
} else {
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_wake(wq, 0, signaler->error, NULL);
|
2016-09-09 20:11:41 +07:00
|
|
|
pending = 0;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&signaler->wait.lock, flags);
|
|
|
|
|
|
|
|
return pending;
|
|
|
|
}
|
|
|
|
|
2016-10-28 19:58:25 +07:00
|
|
|
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|
|
|
struct i915_sw_fence *signaler,
|
2017-06-20 17:06:13 +07:00
|
|
|
wait_queue_entry_t *wq)
|
2016-10-28 19:58:25 +07:00
|
|
|
{
|
|
|
|
return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
|
|
|
|
struct i915_sw_fence *signaler,
|
|
|
|
gfp_t gfp)
|
|
|
|
{
|
|
|
|
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
|
|
|
|
}
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
struct i915_sw_dma_fence_cb_timer {
|
|
|
|
struct i915_sw_dma_fence_cb base;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *dma;
|
2016-09-09 20:11:41 +07:00
|
|
|
struct timer_list timer;
|
2017-09-11 15:41:26 +07:00
|
|
|
struct irq_work work;
|
2017-12-13 16:48:02 +07:00
|
|
|
struct rcu_head rcu;
|
2016-09-09 20:11:41 +07:00
|
|
|
};
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
static void dma_i915_sw_fence_wake(struct dma_fence *dma,
|
|
|
|
struct dma_fence_cb *data)
|
|
|
|
{
|
|
|
|
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
|
|
|
|
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_set_error_once(cb->fence, dma->error);
|
2018-01-15 16:06:43 +07:00
|
|
|
i915_sw_fence_complete(cb->fence);
|
|
|
|
kfree(cb);
|
|
|
|
}
|
|
|
|
|
2017-10-17 13:53:04 +07:00
|
|
|
static void timer_i915_sw_fence_wake(struct timer_list *t)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2018-01-15 16:06:43 +07:00
|
|
|
struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
|
2017-09-11 15:41:26 +07:00
|
|
|
struct i915_sw_fence *fence;
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
fence = xchg(&cb->base.fence, NULL);
|
2017-09-11 15:41:26 +07:00
|
|
|
if (!fence)
|
|
|
|
return;
|
2016-09-09 20:11:41 +07:00
|
|
|
|
2018-11-14 22:11:06 +07:00
|
|
|
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
|
2018-09-14 19:40:07 +07:00
|
|
|
cb->dma->ops->get_driver_name(cb->dma),
|
|
|
|
cb->dma->ops->get_timeline_name(cb->dma),
|
|
|
|
cb->dma->seqno,
|
|
|
|
i915_sw_fence_debug_hint(fence));
|
2016-09-09 20:11:41 +07:00
|
|
|
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
|
2017-09-11 15:41:26 +07:00
|
|
|
i915_sw_fence_complete(fence);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
|
|
|
|
struct dma_fence_cb *data)
|
2016-09-09 20:11:41 +07:00
|
|
|
{
|
2018-01-15 16:06:43 +07:00
|
|
|
struct i915_sw_dma_fence_cb_timer *cb =
|
|
|
|
container_of(data, typeof(*cb), base.base);
|
2017-09-11 15:41:26 +07:00
|
|
|
struct i915_sw_fence *fence;
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
fence = xchg(&cb->base.fence, NULL);
|
2019-12-06 23:04:28 +07:00
|
|
|
if (fence) {
|
|
|
|
i915_sw_fence_set_error_once(fence, dma->error);
|
2017-09-11 15:41:26 +07:00
|
|
|
i915_sw_fence_complete(fence);
|
2019-12-06 23:04:28 +07:00
|
|
|
}
|
2017-09-11 15:41:26 +07:00
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
irq_work_queue(&cb->work);
|
2017-09-11 15:41:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_i915_sw_fence_work(struct irq_work *wrk)
|
|
|
|
{
|
2018-01-15 16:06:43 +07:00
|
|
|
struct i915_sw_dma_fence_cb_timer *cb =
|
|
|
|
container_of(wrk, typeof(*cb), work);
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
del_timer_sync(&cb->timer);
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(cb->dma);
|
2016-09-09 20:11:41 +07:00
|
|
|
|
2017-12-13 16:48:02 +07:00
|
|
|
kfree_rcu(cb, rcu);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *dma,
|
2016-09-09 20:11:41 +07:00
|
|
|
unsigned long timeout,
|
|
|
|
gfp_t gfp)
|
|
|
|
{
|
2016-10-25 19:00:45 +07:00
|
|
|
struct i915_sw_dma_fence_cb *cb;
|
2018-01-15 16:06:43 +07:00
|
|
|
dma_fence_func_t func;
|
2016-09-09 20:11:41 +07:00
|
|
|
int ret;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(fence);
|
2017-12-13 01:06:50 +07:00
|
|
|
might_sleep_if(gfpflags_allow_blocking(gfp));
|
2016-11-25 20:17:18 +07:00
|
|
|
|
2019-12-06 23:04:28 +07:00
|
|
|
if (dma_fence_is_signaled(dma)) {
|
|
|
|
i915_sw_fence_set_error_once(fence, dma->error);
|
2016-09-09 20:11:41 +07:00
|
|
|
return 0;
|
2019-12-06 23:04:28 +07:00
|
|
|
}
|
2016-09-09 20:11:41 +07:00
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
cb = kmalloc(timeout ?
|
|
|
|
sizeof(struct i915_sw_dma_fence_cb_timer) :
|
|
|
|
sizeof(struct i915_sw_dma_fence_cb),
|
|
|
|
gfp);
|
2016-09-09 20:11:41 +07:00
|
|
|
if (!cb) {
|
|
|
|
if (!gfpflags_allow_blocking(gfp))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-12-06 23:04:28 +07:00
|
|
|
ret = dma_fence_wait(dma, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
i915_sw_fence_set_error_once(fence, dma->error);
|
|
|
|
return 0;
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2017-05-17 19:09:56 +07:00
|
|
|
cb->fence = fence;
|
2016-09-09 20:11:41 +07:00
|
|
|
i915_sw_fence_await(fence);
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
func = dma_i915_sw_fence_wake;
|
2016-09-09 20:11:41 +07:00
|
|
|
if (timeout) {
|
2018-01-15 16:06:43 +07:00
|
|
|
struct i915_sw_dma_fence_cb_timer *timer =
|
|
|
|
container_of(cb, typeof(*timer), base);
|
2018-01-15 16:06:42 +07:00
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
timer->dma = dma_fence_get(dma);
|
|
|
|
init_irq_work(&timer->work, irq_i915_sw_fence_work);
|
|
|
|
|
|
|
|
timer_setup(&timer->timer,
|
2018-01-15 16:06:42 +07:00
|
|
|
timer_i915_sw_fence_wake, TIMER_IRQSAFE);
|
2018-01-15 16:06:43 +07:00
|
|
|
mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
|
|
|
|
|
|
|
|
func = dma_i915_sw_fence_wake_timer;
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
2018-01-15 16:06:43 +07:00
|
|
|
ret = dma_fence_add_callback(dma, &cb->base, func);
|
2016-09-09 20:11:41 +07:00
|
|
|
if (ret == 0) {
|
|
|
|
ret = 1;
|
|
|
|
} else {
|
2018-01-15 16:06:43 +07:00
|
|
|
func(dma, &cb->base);
|
2016-09-09 20:11:41 +07:00
|
|
|
if (ret == -ENOENT) /* fence already signaled */
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-22 16:23:25 +07:00
|
|
|
static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
|
|
|
|
struct dma_fence_cb *data)
|
|
|
|
{
|
|
|
|
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
|
|
|
|
|
2019-08-18 06:25:11 +07:00
|
|
|
i915_sw_fence_set_error_once(cb->fence, dma->error);
|
2019-03-22 16:23:25 +07:00
|
|
|
i915_sw_fence_complete(cb->fence);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
|
|
|
struct dma_fence *dma,
|
|
|
|
struct i915_sw_dma_fence_cb *cb)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
debug_fence_assert(fence);
|
|
|
|
|
2019-12-06 23:04:28 +07:00
|
|
|
if (dma_fence_is_signaled(dma)) {
|
|
|
|
i915_sw_fence_set_error_once(fence, dma->error);
|
2019-03-22 16:23:25 +07:00
|
|
|
return 0;
|
2019-12-06 23:04:28 +07:00
|
|
|
}
|
2019-03-22 16:23:25 +07:00
|
|
|
|
|
|
|
cb->fence = fence;
|
|
|
|
i915_sw_fence_await(fence);
|
|
|
|
|
|
|
|
ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = 1;
|
|
|
|
} else {
|
2019-08-18 06:25:11 +07:00
|
|
|
__dma_i915_sw_fence_wake(dma, &cb->base);
|
2019-03-22 16:23:25 +07:00
|
|
|
if (ret == -ENOENT) /* fence already signaled */
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
|
2019-08-11 15:06:32 +07:00
|
|
|
struct dma_resv *resv,
|
2016-10-25 19:00:45 +07:00
|
|
|
const struct dma_fence_ops *exclude,
|
2016-09-09 20:11:41 +07:00
|
|
|
bool write,
|
|
|
|
unsigned long timeout,
|
|
|
|
gfp_t gfp)
|
|
|
|
{
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *excl;
|
2016-09-09 20:11:41 +07:00
|
|
|
int ret = 0, pending;
|
|
|
|
|
2016-11-25 20:17:18 +07:00
|
|
|
debug_fence_assert(fence);
|
2017-12-13 01:06:50 +07:00
|
|
|
might_sleep_if(gfpflags_allow_blocking(gfp));
|
2016-11-25 20:17:18 +07:00
|
|
|
|
2016-09-09 20:11:41 +07:00
|
|
|
if (write) {
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence **shared;
|
2016-09-09 20:11:41 +07:00
|
|
|
unsigned int count, i;
|
|
|
|
|
2019-12-06 23:04:28 +07:00
|
|
|
ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
|
2016-09-09 20:11:41 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
if (shared[i]->ops == exclude)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pending = i915_sw_fence_await_dma_fence(fence,
|
|
|
|
shared[i],
|
|
|
|
timeout,
|
|
|
|
gfp);
|
|
|
|
if (pending < 0) {
|
|
|
|
ret = pending;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret |= pending;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++)
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(shared[i]);
|
2016-09-09 20:11:41 +07:00
|
|
|
kfree(shared);
|
|
|
|
} else {
|
2019-08-11 15:06:32 +07:00
|
|
|
excl = dma_resv_get_excl_rcu(resv);
|
2016-09-09 20:11:41 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret >= 0 && excl && excl->ops != exclude) {
|
|
|
|
pending = i915_sw_fence_await_dma_fence(fence,
|
|
|
|
excl,
|
|
|
|
timeout,
|
|
|
|
gfp);
|
|
|
|
if (pending < 0)
|
|
|
|
ret = pending;
|
|
|
|
else
|
|
|
|
ret |= pending;
|
|
|
|
}
|
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(excl);
|
2016-09-09 20:11:41 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-05-17 19:09:57 +07:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
2017-10-12 19:57:25 +07:00
|
|
|
#include "selftests/lib_sw_fence.c"
|
2017-05-17 19:09:57 +07:00
|
|
|
#include "selftests/i915_sw_fence.c"
|
|
|
|
#endif
|