2019-03-08 20:25:17 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __INTEL_CONTEXT_H__
|
|
|
|
#define __INTEL_CONTEXT_H__
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
#include <linux/lockdep.h>
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
#include "i915_active.h"
|
2019-03-08 20:25:17 +07:00
|
|
|
#include "intel_context_types.h"
|
|
|
|
#include "intel_engine_types.h"
|
|
|
|
|
|
|
|
void intel_context_init(struct intel_context *ce,
|
|
|
|
struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine);
|
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
struct intel_context *
|
2019-04-26 23:33:34 +07:00
|
|
|
intel_context_create(struct i915_gem_context *ctx,
|
2019-03-08 20:25:19 +07:00
|
|
|
struct intel_engine_cs *engine);
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
void intel_context_free(struct intel_context *ce);
|
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
/**
|
2019-04-26 23:33:32 +07:00
|
|
|
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
|
|
|
|
* @ce - the context
|
2019-03-08 20:25:19 +07:00
|
|
|
*
|
2019-03-08 20:25:22 +07:00
|
|
|
* Acquire a lock on the pinned status of the HW context, such that the context
|
|
|
|
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
|
|
|
|
* intel_context_is_pinned() remains stable.
|
2019-03-08 20:25:19 +07:00
|
|
|
*/
|
2019-04-26 23:33:32 +07:00
|
|
|
static inline int intel_context_lock_pinned(struct intel_context *ce)
|
|
|
|
__acquires(ce->pin_mutex)
|
|
|
|
{
|
|
|
|
return mutex_lock_interruptible(&ce->pin_mutex);
|
|
|
|
}
|
2019-03-08 20:25:19 +07:00
|
|
|
|
2019-04-26 23:33:32 +07:00
|
|
|
/**
|
|
|
|
* intel_context_is_pinned - Reports the 'pinned' status
|
|
|
|
* @ce - the context
|
|
|
|
*
|
|
|
|
* While in use by the GPU, the context, along with its ring and page
|
|
|
|
* tables is pinned into memory and the GTT.
|
|
|
|
*
|
|
|
|
* Returns: true if the context is currently pinned for use by the GPU.
|
|
|
|
*/
|
2019-03-08 20:25:22 +07:00
|
|
|
static inline bool
|
|
|
|
intel_context_is_pinned(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return atomic_read(&ce->pin_count);
|
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:32 +07:00
|
|
|
/**
|
|
|
|
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
|
|
|
|
* @ce - the context
|
|
|
|
*
|
|
|
|
* Releases the lock earlier acquired by intel_context_unlock_pinned().
|
|
|
|
*/
|
|
|
|
static inline void intel_context_unlock_pinned(struct intel_context *ce)
|
|
|
|
__releases(ce->pin_mutex)
|
|
|
|
{
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
|
|
|
}
|
2019-03-08 20:25:22 +07:00
|
|
|
|
2019-04-26 23:33:29 +07:00
|
|
|
int __intel_context_do_pin(struct intel_context *ce);
|
|
|
|
|
|
|
|
static inline int intel_context_pin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __intel_context_do_pin(ce);
|
|
|
|
}
|
2019-03-08 20:25:17 +07:00
|
|
|
|
|
|
|
static inline void __intel_context_pin(struct intel_context *ce)
|
|
|
|
{
|
2019-03-08 20:25:22 +07:00
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(ce));
|
|
|
|
atomic_inc(&ce->pin_count);
|
2019-03-08 20:25:17 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
void intel_context_unpin(struct intel_context *ce);
|
2019-03-08 20:25:17 +07:00
|
|
|
|
2019-04-25 03:07:15 +07:00
|
|
|
void intel_context_enter_engine(struct intel_context *ce);
|
|
|
|
void intel_context_exit_engine(struct intel_context *ce);
|
|
|
|
|
|
|
|
static inline void intel_context_enter(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (!ce->active_count++)
|
|
|
|
ce->ops->enter(ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_mark_active(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
++ce->active_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_exit(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!ce->active_count);
|
|
|
|
if (!--ce->active_count)
|
|
|
|
ce->ops->exit(ce);
|
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
static inline int intel_context_active_acquire(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return i915_active_acquire(&ce->active);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_active_release(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
/* Nodes preallocated in intel_context_active() */
|
|
|
|
i915_active_acquire_barrier(&ce->active);
|
|
|
|
i915_active_release(&ce->active);
|
|
|
|
}
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
|
2019-03-19 04:23:47 +07:00
|
|
|
static inline struct intel_context *intel_context_get(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kref_get(&ce->ref);
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_put(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kref_put(&ce->ref, ce->ops->destroy);
|
|
|
|
}
|
|
|
|
|
2019-06-10 17:36:10 +07:00
|
|
|
static inline int __must_check
|
|
|
|
intel_context_timeline_lock(struct intel_context *ce)
|
2019-04-25 03:07:16 +07:00
|
|
|
__acquires(&ce->ring->timeline->mutex)
|
|
|
|
{
|
2019-06-10 17:36:10 +07:00
|
|
|
return mutex_lock_interruptible(&ce->ring->timeline->mutex);
|
2019-04-25 03:07:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_timeline_unlock(struct intel_context *ce)
|
|
|
|
__releases(&ce->ring->timeline->mutex)
|
|
|
|
{
|
|
|
|
mutex_unlock(&ce->ring->timeline->mutex);
|
|
|
|
}
|
|
|
|
|
2019-07-17 04:34:43 +07:00
|
|
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
|
|
|
struct i915_request *rq);
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
|
|
|
|
2019-03-08 20:25:17 +07:00
|
|
|
#endif /* __INTEL_CONTEXT_H__ */
|