2017-02-14 00:15:14 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "intel_context.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "intel_engine_pm.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
|
2017-02-14 00:15:14 +07:00
|
|
|
#include "mock_engine.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "selftests/mock_request.h"
|
2017-02-14 00:15:14 +07:00
|
|
|
|
2018-05-02 23:38:39 +07:00
|
|
|
struct mock_ring {
|
|
|
|
struct intel_ring base;
|
|
|
|
struct i915_timeline timeline;
|
|
|
|
};
|
|
|
|
|
2019-01-29 01:18:11 +07:00
|
|
|
static void mock_timeline_pin(struct i915_timeline *tl)
|
|
|
|
{
|
|
|
|
tl->pin_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_timeline_unpin(struct i915_timeline *tl)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!tl->pin_count);
|
|
|
|
tl->pin_count--;
|
|
|
|
}
|
|
|
|
|
2019-01-19 02:08:05 +07:00
|
|
|
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
const unsigned long sz = PAGE_SIZE / 2;
|
|
|
|
struct mock_ring *ring;
|
|
|
|
|
|
|
|
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
|
|
|
|
if (!ring)
|
|
|
|
return NULL;
|
|
|
|
|
2019-03-21 21:07:11 +07:00
|
|
|
if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
|
2019-01-29 01:18:09 +07:00
|
|
|
kfree(ring);
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-01-19 02:08:05 +07:00
|
|
|
|
2019-03-18 16:51:46 +07:00
|
|
|
kref_init(&ring->base.ref);
|
2019-01-19 02:08:05 +07:00
|
|
|
ring->base.size = sz;
|
|
|
|
ring->base.effective_size = sz;
|
|
|
|
ring->base.vaddr = (void *)(ring + 1);
|
|
|
|
ring->base.timeline = &ring->timeline;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ring->base.request_list);
|
|
|
|
intel_ring_update_space(&ring->base);
|
|
|
|
|
|
|
|
return &ring->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_ring_free(struct intel_ring *base)
|
|
|
|
{
|
|
|
|
struct mock_ring *ring = container_of(base, typeof(*ring), base);
|
|
|
|
|
|
|
|
i915_timeline_fini(&ring->timeline);
|
|
|
|
kfree(ring);
|
|
|
|
}
|
|
|
|
|
2019-02-28 17:20:33 +07:00
|
|
|
static struct i915_request *first_request(struct mock_engine *engine)
|
2017-02-14 00:15:14 +07:00
|
|
|
{
|
2017-02-14 00:15:19 +07:00
|
|
|
return list_first_entry_or_null(&engine->hw_queue,
|
2019-02-28 17:20:33 +07:00
|
|
|
struct i915_request,
|
|
|
|
mock.link);
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2019-02-28 17:20:33 +07:00
|
|
|
static void advance(struct i915_request *request)
|
2017-10-25 05:08:54 +07:00
|
|
|
{
|
2019-02-28 17:20:33 +07:00
|
|
|
list_del_init(&request->mock.link);
|
|
|
|
i915_request_mark_complete(request);
|
|
|
|
GEM_BUG_ON(!i915_request_completed(request));
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
|
2019-02-28 17:20:33 +07:00
|
|
|
intel_engine_queue_breadcrumbs(request->engine);
|
2017-10-25 05:08:54 +07:00
|
|
|
}
|
|
|
|
|
2017-10-17 13:53:04 +07:00
|
|
|
static void hw_delay_complete(struct timer_list *t)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
2017-10-17 13:53:04 +07:00
|
|
|
struct mock_engine *engine = from_timer(engine, t, hw_delay);
|
2019-02-28 17:20:33 +07:00
|
|
|
struct i915_request *request;
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
unsigned long flags;
|
2017-02-14 00:15:19 +07:00
|
|
|
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
spin_lock_irqsave(&engine->hw_lock, flags);
|
2017-02-14 00:15:19 +07:00
|
|
|
|
2017-10-25 05:08:54 +07:00
|
|
|
/* Timer fired, first request is complete */
|
2017-02-14 00:15:19 +07:00
|
|
|
request = first_request(engine);
|
|
|
|
if (request)
|
2019-01-22 05:21:01 +07:00
|
|
|
advance(request);
|
2017-10-25 05:08:54 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Also immediately signal any subsequent 0-delay requests, but
|
|
|
|
* requeue the timer for the next delayed request.
|
|
|
|
*/
|
|
|
|
while ((request = first_request(engine))) {
|
2019-02-28 17:20:33 +07:00
|
|
|
if (request->mock.delay) {
|
|
|
|
mod_timer(&engine->hw_delay,
|
|
|
|
jiffies + request->mock.delay);
|
2017-10-25 05:08:54 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-01-22 05:21:01 +07:00
|
|
|
advance(request);
|
2017-10-25 05:08:54 +07:00
|
|
|
}
|
2017-02-14 00:15:19 +07:00
|
|
|
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
spin_unlock_irqrestore(&engine->hw_lock, flags);
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2018-05-18 04:26:32 +07:00
|
|
|
static void mock_context_unpin(struct intel_context *ce)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
2019-01-29 01:18:11 +07:00
|
|
|
mock_timeline_unpin(ce->ring->timeline);
|
2018-05-18 04:26:32 +07:00
|
|
|
}
|
2018-04-30 20:15:01 +07:00
|
|
|
|
2019-03-19 04:23:47 +07:00
|
|
|
static void mock_context_destroy(struct kref *ref)
|
2018-05-18 04:26:32 +07:00
|
|
|
{
|
2019-03-19 04:23:47 +07:00
|
|
|
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
GEM_BUG_ON(intel_context_is_pinned(ce));
|
2019-01-19 02:08:05 +07:00
|
|
|
|
|
|
|
if (ce->ring)
|
|
|
|
mock_ring_free(ce->ring);
|
2019-03-19 04:23:47 +07:00
|
|
|
|
|
|
|
intel_context_free(ce);
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:20 +07:00
|
|
|
static int mock_context_pin(struct intel_context *ce)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
2019-01-19 02:08:05 +07:00
|
|
|
if (!ce->ring) {
|
2019-03-08 20:25:20 +07:00
|
|
|
ce->ring = mock_ring(ce->engine);
|
2019-01-19 02:08:05 +07:00
|
|
|
if (!ce->ring)
|
2019-03-08 20:25:20 +07:00
|
|
|
return -ENOMEM;
|
2018-05-18 04:26:32 +07:00
|
|
|
}
|
|
|
|
|
2019-01-29 01:18:11 +07:00
|
|
|
mock_timeline_pin(ce->ring->timeline);
|
2019-03-08 20:25:20 +07:00
|
|
|
return 0;
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:18 +07:00
|
|
|
static const struct intel_context_ops mock_context_ops = {
|
2019-03-08 20:25:20 +07:00
|
|
|
.pin = mock_context_pin,
|
2019-03-08 20:25:18 +07:00
|
|
|
.unpin = mock_context_unpin,
|
2019-03-19 04:23:47 +07:00
|
|
|
|
2019-04-25 03:07:15 +07:00
|
|
|
.enter = intel_context_enter_engine,
|
|
|
|
.exit = intel_context_exit_engine,
|
|
|
|
|
2019-03-08 20:25:18 +07:00
|
|
|
.destroy = mock_context_destroy,
|
|
|
|
};
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
static int mock_request_alloc(struct i915_request *request)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
2019-02-28 17:20:33 +07:00
|
|
|
INIT_LIST_HEAD(&request->mock.link);
|
|
|
|
request->mock.delay = 0;
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
static int mock_emit_flush(struct i915_request *request,
|
2017-02-14 00:15:19 +07:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-25 17:05:20 +07:00
|
|
|
static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
2019-01-25 17:05:20 +07:00
|
|
|
return cs;
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
static void mock_submit_request(struct i915_request *request)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
|
|
|
struct mock_engine *engine =
|
|
|
|
container_of(request->engine, typeof(*engine), base);
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
unsigned long flags;
|
2017-02-14 00:15:19 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_submit(request);
|
2017-02-14 00:15:19 +07:00
|
|
|
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
spin_lock_irqsave(&engine->hw_lock, flags);
|
2019-02-28 17:20:33 +07:00
|
|
|
list_add_tail(&request->mock.link, &engine->hw_queue);
|
|
|
|
if (list_is_first(&request->mock.link, &engine->hw_queue)) {
|
|
|
|
if (request->mock.delay)
|
|
|
|
mod_timer(&engine->hw_delay,
|
|
|
|
jiffies + request->mock.delay);
|
2017-10-25 05:08:54 +07:00
|
|
|
else
|
2019-02-28 17:20:33 +07:00
|
|
|
advance(request);
|
2017-10-25 05:08:54 +07:00
|
|
|
}
|
drm/i915: Replace global breadcrumbs with per-context interrupt tracking
A few years ago, see commit 688e6c725816 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.
To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.
Before commit 688e6c725816, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c725816 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)
The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).
Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.
v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.
References: 688e6c725816 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
2019-01-30 03:52:29 +07:00
|
|
|
spin_unlock_irqrestore(&engine->hw_lock, flags);
|
2017-02-14 00:15:19 +07:00
|
|
|
}
|
|
|
|
|
2019-03-20 04:42:33 +07:00
|
|
|
static void mock_reset_prepare(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_reset(struct intel_engine_cs *engine, bool stalled)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(stalled);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_reset_finish(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mock_cancel_requests(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct i915_request *request;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&engine->timeline.lock, flags);
|
|
|
|
|
|
|
|
/* Mark all submitted requests as skipped. */
|
|
|
|
list_for_each_entry(request, &engine->timeline.requests, sched.link) {
|
|
|
|
if (!i915_request_signaled(request))
|
|
|
|
dma_fence_set_error(&request->fence, -EIO);
|
|
|
|
|
|
|
|
i915_request_mark_complete(request);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:15:19 +07:00
|
|
|
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
|
2017-08-09 23:39:30 +07:00
|
|
|
const char *name,
|
|
|
|
int id)
|
2017-02-14 00:15:19 +07:00
|
|
|
{
|
|
|
|
struct mock_engine *engine;
|
2019-04-26 23:33:29 +07:00
|
|
|
int err;
|
2017-08-09 23:39:30 +07:00
|
|
|
|
|
|
|
GEM_BUG_ON(id >= I915_NUM_ENGINES);
|
2017-02-14 00:15:14 +07:00
|
|
|
|
|
|
|
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
|
|
|
|
if (!engine)
|
|
|
|
return NULL;
|
|
|
|
|
2017-02-14 00:15:19 +07:00
|
|
|
/* minimal engine setup for requests */
|
|
|
|
engine->base.i915 = i915;
|
2017-04-10 21:34:31 +07:00
|
|
|
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
|
2017-08-09 23:39:30 +07:00
|
|
|
engine->base.id = id;
|
2019-03-06 01:03:30 +07:00
|
|
|
engine->base.mask = BIT(id);
|
2019-01-28 17:23:55 +07:00
|
|
|
engine->base.status_page.addr = (void *)(engine + 1);
|
2017-02-14 00:15:14 +07:00
|
|
|
|
2019-03-08 20:25:18 +07:00
|
|
|
engine->base.cops = &mock_context_ops;
|
2017-02-14 00:15:19 +07:00
|
|
|
engine->base.request_alloc = mock_request_alloc;
|
|
|
|
engine->base.emit_flush = mock_emit_flush;
|
2019-01-30 01:54:50 +07:00
|
|
|
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
|
2017-02-14 00:15:19 +07:00
|
|
|
engine->base.submit_request = mock_submit_request;
|
|
|
|
|
2019-03-20 04:42:33 +07:00
|
|
|
engine->base.reset.prepare = mock_reset_prepare;
|
|
|
|
engine->base.reset.reset = mock_reset;
|
|
|
|
engine->base.reset.finish = mock_reset_finish;
|
|
|
|
engine->base.cancel_requests = mock_cancel_requests;
|
|
|
|
|
2019-03-21 21:07:11 +07:00
|
|
|
if (i915_timeline_init(i915, &engine->base.timeline, NULL))
|
2019-01-29 01:18:09 +07:00
|
|
|
goto err_free;
|
2018-11-16 03:38:51 +07:00
|
|
|
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
|
2018-07-07 04:07:10 +07:00
|
|
|
|
2017-02-14 00:15:19 +07:00
|
|
|
intel_engine_init_breadcrumbs(&engine->base);
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_init_execlists(&engine->base);
|
|
|
|
intel_engine_init__pm(&engine->base);
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
/* fake hw queue */
|
|
|
|
spin_lock_init(&engine->hw_lock);
|
2017-10-17 13:53:04 +07:00
|
|
|
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
|
2017-02-14 00:15:19 +07:00
|
|
|
INIT_LIST_HEAD(&engine->hw_queue);
|
|
|
|
|
2019-04-25 00:48:39 +07:00
|
|
|
engine->base.kernel_context =
|
2019-04-26 23:33:29 +07:00
|
|
|
intel_context_instance(i915->kernel_context, &engine->base);
|
2019-04-25 00:48:39 +07:00
|
|
|
if (IS_ERR(engine->base.kernel_context))
|
2019-01-19 02:08:05 +07:00
|
|
|
goto err_breadcrumbs;
|
2018-05-23 21:23:46 +07:00
|
|
|
|
2019-04-26 23:33:29 +07:00
|
|
|
err = intel_context_pin(engine->base.kernel_context);
|
|
|
|
intel_context_put(engine->base.kernel_context);
|
|
|
|
if (err)
|
|
|
|
goto err_breadcrumbs;
|
|
|
|
|
2017-02-14 00:15:19 +07:00
|
|
|
return &engine->base;
|
2018-04-30 20:15:02 +07:00
|
|
|
|
|
|
|
err_breadcrumbs:
|
|
|
|
intel_engine_fini_breadcrumbs(&engine->base);
|
2018-05-02 23:38:39 +07:00
|
|
|
i915_timeline_fini(&engine->base.timeline);
|
2019-01-29 01:18:09 +07:00
|
|
|
err_free:
|
2018-04-30 20:15:02 +07:00
|
|
|
kfree(engine);
|
|
|
|
return NULL;
|
2017-02-14 00:15:14 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mock_engine_flush(struct intel_engine_cs *engine)
|
|
|
|
{
|
2017-02-14 00:15:19 +07:00
|
|
|
struct mock_engine *mock =
|
|
|
|
container_of(engine, typeof(*mock), base);
|
2019-02-28 17:20:33 +07:00
|
|
|
struct i915_request *request, *rn;
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
del_timer_sync(&mock->hw_delay);
|
|
|
|
|
|
|
|
spin_lock_irq(&mock->hw_lock);
|
2019-02-28 17:20:33 +07:00
|
|
|
list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
|
2019-01-22 05:21:01 +07:00
|
|
|
advance(request);
|
2017-02-14 00:15:19 +07:00
|
|
|
spin_unlock_irq(&mock->hw_lock);
|
2017-02-14 00:15:14 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mock_engine_reset(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
}
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
void mock_engine_free(struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct mock_engine *mock =
|
|
|
|
container_of(engine, typeof(*mock), base);
|
2018-05-18 04:26:32 +07:00
|
|
|
struct intel_context *ce;
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
GEM_BUG_ON(timer_pending(&mock->hw_delay));
|
|
|
|
|
2018-05-18 04:26:32 +07:00
|
|
|
ce = fetch_and_zero(&engine->last_retired_context);
|
|
|
|
if (ce)
|
|
|
|
intel_context_unpin(ce);
|
2017-02-14 00:15:19 +07:00
|
|
|
|
2019-03-08 20:25:21 +07:00
|
|
|
intel_context_unpin(engine->kernel_context);
|
2018-05-23 21:23:46 +07:00
|
|
|
|
2017-02-14 00:15:19 +07:00
|
|
|
intel_engine_fini_breadcrumbs(engine);
|
2018-05-02 23:38:39 +07:00
|
|
|
i915_timeline_fini(&engine->timeline);
|
2017-02-14 00:15:19 +07:00
|
|
|
|
|
|
|
kfree(engine);
|
|
|
|
}
|