2019-03-08 20:25:19 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_context.h"
|
|
|
|
#include "gem/i915_gem_pm.h"
|
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_globals.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
#include "intel_context.h"
|
2019-04-25 00:48:39 +07:00
|
|
|
#include "intel_engine.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
#include "intel_engine_pm.h"
|
2019-03-08 20:25:19 +07:00
|
|
|
|
|
|
|
static struct i915_global_context {
|
|
|
|
struct i915_global base;
|
|
|
|
struct kmem_cache *slab_ce;
|
|
|
|
} global;
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
static struct intel_context *intel_context_alloc(void)
|
2019-03-08 20:25:19 +07:00
|
|
|
{
|
|
|
|
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_context_free(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kmem_cache_free(global.slab_ce, ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_context *
|
2019-04-26 23:33:34 +07:00
|
|
|
intel_context_create(struct i915_gem_context *ctx,
|
2019-03-08 20:25:19 +07:00
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-04-26 23:33:34 +07:00
|
|
|
struct intel_context *ce;
|
2019-03-08 20:25:19 +07:00
|
|
|
|
|
|
|
ce = intel_context_alloc();
|
|
|
|
if (!ce)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
intel_context_init(ce, ctx, engine);
|
2019-04-26 23:33:34 +07:00
|
|
|
return ce;
|
2019-03-08 20:25:19 +07:00
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:29 +07:00
|
|
|
int __intel_context_do_pin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
int err;
|
2019-03-08 20:25:22 +07:00
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&ce->pin_mutex))
|
2019-04-26 23:33:29 +07:00
|
|
|
return -EINTR;
|
2019-03-08 20:25:22 +07:00
|
|
|
|
|
|
|
if (likely(!atomic_read(&ce->pin_count))) {
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
|
|
|
err = 0;
|
2019-06-14 06:21:55 +07:00
|
|
|
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
err = ce->ops->pin(ce);
|
2019-03-08 20:25:20 +07:00
|
|
|
if (err)
|
2019-03-08 20:25:22 +07:00
|
|
|
goto err;
|
2019-03-08 20:25:20 +07:00
|
|
|
|
2019-06-26 02:48:59 +07:00
|
|
|
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
|
|
|
|
ce->engine->name, ce->ring->timeline->fence_context,
|
|
|
|
ce->ring->head, ce->ring->tail);
|
|
|
|
|
2019-04-26 23:33:35 +07:00
|
|
|
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
|
2019-03-08 20:25:20 +07:00
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
smp_mb__before_atomic(); /* flush pin before it is visible */
|
2019-03-08 20:25:20 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
atomic_inc(&ce->pin_count);
|
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
|
|
|
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-04-26 23:33:29 +07:00
|
|
|
return 0;
|
2019-03-08 20:25:20 +07:00
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
err:
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-04-26 23:33:29 +07:00
|
|
|
return err;
|
2019-03-08 20:25:20 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
void intel_context_unpin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We may be called from inside intel_context_pin() to evict another */
|
2019-03-19 04:23:47 +07:00
|
|
|
intel_context_get(ce);
|
2019-03-08 20:25:22 +07:00
|
|
|
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
|
|
|
|
|
2019-03-19 04:23:46 +07:00
|
|
|
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
2019-06-26 02:48:59 +07:00
|
|
|
GEM_TRACE("%s context:%llx retire\n",
|
|
|
|
ce->engine->name, ce->ring->timeline->fence_context);
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
ce->ops->unpin(ce);
|
|
|
|
|
2019-03-19 04:23:46 +07:00
|
|
|
i915_gem_context_put(ce->gem_context);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
intel_context_active_release(ce);
|
2019-03-19 04:23:46 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-03-19 04:23:47 +07:00
|
|
|
intel_context_put(ce);
|
2019-03-08 20:25:22 +07:00
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
static int __context_pin_state(struct i915_vma *vma)
|
2019-03-08 20:25:19 +07:00
|
|
|
{
|
2019-06-22 01:38:00 +07:00
|
|
|
u64 flags;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
int err;
|
2019-03-08 20:25:19 +07:00
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
|
|
|
|
flags |= PIN_HIGH | PIN_GLOBAL;
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And mark it as a globally pinned object to let the shrinker know
|
|
|
|
* it cannot reclaim the object until we release it.
|
|
|
|
*/
|
|
|
|
vma->obj->pin_global++;
|
|
|
|
vma->obj->mm.dirty = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __context_unpin_state(struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
vma->obj->pin_global--;
|
|
|
|
__i915_vma_unpin(vma);
|
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
static void __intel_context_retire(struct i915_active *active)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
{
|
|
|
|
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
|
|
|
|
2019-06-26 02:48:59 +07:00
|
|
|
GEM_TRACE("%s context:%llx retire\n",
|
|
|
|
ce->engine->name, ce->ring->timeline->fence_context);
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
if (ce->state)
|
|
|
|
__context_unpin_state(ce->state);
|
|
|
|
|
2019-06-20 00:01:35 +07:00
|
|
|
intel_ring_unpin(ce->ring);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
intel_context_put(ce);
|
2019-03-08 20:25:19 +07:00
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
static int __intel_context_active(struct i915_active *active)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
{
|
2019-06-22 01:38:00 +07:00
|
|
|
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
int err;
|
|
|
|
|
|
|
|
intel_context_get(ce);
|
|
|
|
|
2019-06-20 00:01:35 +07:00
|
|
|
err = intel_ring_pin(ce->ring);
|
|
|
|
if (err)
|
|
|
|
goto err_put;
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
if (!ce->state)
|
|
|
|
return 0;
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
err = __context_pin_state(ce->state);
|
2019-06-20 00:01:35 +07:00
|
|
|
if (err)
|
|
|
|
goto err_ring;
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
|
2019-08-02 17:00:15 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_ring:
|
|
|
|
intel_ring_unpin(ce->ring);
|
|
|
|
err_put:
|
|
|
|
intel_context_put(ce);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int intel_context_active_acquire(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = i915_active_acquire(&ce->active);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
/* Preallocate tracking nodes */
|
|
|
|
if (!i915_gem_context_is_kernel(ce->gem_context)) {
|
|
|
|
err = i915_active_acquire_preallocate_barrier(&ce->active,
|
|
|
|
ce->engine);
|
2019-08-02 17:00:15 +07:00
|
|
|
if (err) {
|
|
|
|
i915_active_release(&ce->active);
|
|
|
|
return err;
|
|
|
|
}
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-08-02 17:00:15 +07:00
|
|
|
}
|
2019-06-20 00:01:35 +07:00
|
|
|
|
2019-08-02 17:00:15 +07:00
|
|
|
void intel_context_active_release(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
/* Nodes preallocated in intel_context_active() */
|
|
|
|
i915_active_acquire_barrier(&ce->active);
|
|
|
|
i915_active_release(&ce->active);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
void
|
|
|
|
intel_context_init(struct intel_context *ce,
|
|
|
|
struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
{
|
2019-06-22 01:38:00 +07:00
|
|
|
GEM_BUG_ON(!engine->cops);
|
|
|
|
|
|
|
|
kref_init(&ce->ref);
|
|
|
|
|
|
|
|
ce->gem_context = ctx;
|
2019-07-30 21:32:09 +07:00
|
|
|
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
ce->engine = engine;
|
|
|
|
ce->ops = engine->cops;
|
|
|
|
ce->sseu = engine->sseu;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ce->signal_link);
|
|
|
|
INIT_LIST_HEAD(&ce->signals);
|
|
|
|
|
|
|
|
mutex_init(&ce->pin_mutex);
|
|
|
|
|
|
|
|
i915_active_init(ctx->i915, &ce->active,
|
|
|
|
__intel_context_active, __intel_context_retire);
|
2019-03-08 20:25:19 +07:00
|
|
|
}
|
|
|
|
|
2019-07-18 14:00:06 +07:00
|
|
|
void intel_context_fini(struct intel_context *ce)
|
|
|
|
{
|
2019-07-30 21:32:09 +07:00
|
|
|
i915_vm_put(ce->vm);
|
|
|
|
|
2019-07-18 14:00:06 +07:00
|
|
|
mutex_destroy(&ce->pin_mutex);
|
|
|
|
i915_active_fini(&ce->active);
|
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
static void i915_global_context_shrink(void)
|
|
|
|
{
|
|
|
|
kmem_cache_shrink(global.slab_ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_global_context_exit(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(global.slab_ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_global_context global = { {
|
|
|
|
.shrink = i915_global_context_shrink,
|
|
|
|
.exit = i915_global_context_exit,
|
|
|
|
} };
|
|
|
|
|
|
|
|
int __init i915_global_context_init(void)
|
|
|
|
{
|
|
|
|
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!global.slab_ce)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_global_register(&global.base);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-04-25 03:07:15 +07:00
|
|
|
|
|
|
|
void intel_context_enter_engine(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_get(ce->engine);
|
2019-04-25 03:07:15 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void intel_context_exit_engine(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-25 03:07:17 +07:00
|
|
|
intel_engine_pm_put(ce->engine);
|
2019-04-25 03:07:15 +07:00
|
|
|
}
|
2019-04-26 23:33:34 +07:00
|
|
|
|
2019-07-17 04:34:43 +07:00
|
|
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
|
|
|
struct intel_timeline *tl = ce->ring->timeline;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Only suitable for use in remotely modifying this context */
|
|
|
|
GEM_BUG_ON(rq->hw_context == ce);
|
|
|
|
|
2019-07-25 20:14:46 +07:00
|
|
|
if (rq->timeline != tl) { /* beware timeline sharing */
|
|
|
|
err = mutex_lock_interruptible_nested(&tl->mutex,
|
|
|
|
SINGLE_DEPTH_NESTING);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* Queue this switch after current activity by this context. */
|
|
|
|
err = i915_active_request_set(&tl->last_request, rq);
|
|
|
|
if (err)
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
lockdep_assert_held(&tl->mutex);
|
2019-07-17 04:34:43 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Guarantee context image and the timeline remains pinned until the
|
|
|
|
* modifying request is retired by setting the ce activity tracker.
|
|
|
|
*
|
|
|
|
* But we only need to take one pin on the account of it. Or in other
|
|
|
|
* words transfer the pinned ce object to tracked active request.
|
|
|
|
*/
|
|
|
|
GEM_BUG_ON(i915_active_is_idle(&ce->active));
|
2019-07-25 20:14:46 +07:00
|
|
|
err = i915_active_ref(&ce->active, rq->fence.context, rq);
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (rq->timeline != tl)
|
|
|
|
mutex_unlock(&tl->mutex);
|
|
|
|
return err;
|
2019-07-17 04:34:43 +07:00
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
struct i915_request *intel_context_create_request(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
struct i915_request *rq;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = intel_context_pin(ce);
|
|
|
|
if (unlikely(err))
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
rq = i915_request_create(ce);
|
|
|
|
intel_context_unpin(ce);
|
|
|
|
|
|
|
|
return rq;
|
|
|
|
}
|
2019-08-02 17:00:15 +07:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
|
|
|
#include "selftest_context.c"
|
|
|
|
#endif
|