2019-03-08 20:25:17 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __INTEL_CONTEXT_H__
|
|
|
|
#define __INTEL_CONTEXT_H__
|
|
|
|
|
2019-12-20 17:12:29 +07:00
|
|
|
#include <linux/bitops.h>
|
2019-03-08 20:25:22 +07:00
|
|
|
#include <linux/lockdep.h>
|
2019-12-20 17:12:29 +07:00
|
|
|
#include <linux/types.h>
|
2019-03-08 20:25:22 +07:00
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
#include "i915_active.h"
|
2019-03-08 20:25:17 +07:00
|
|
|
#include "intel_context_types.h"
|
|
|
|
#include "intel_engine_types.h"
|
2019-10-24 17:03:44 +07:00
|
|
|
#include "intel_ring_types.h"
|
2019-08-16 03:57:09 +07:00
|
|
|
#include "intel_timeline_types.h"
|
2019-03-08 20:25:17 +07:00
|
|
|
|
2019-12-13 22:51:52 +07:00
|
|
|
#define CE_TRACE(ce, fmt, ...) do { \
|
|
|
|
const struct intel_context *ce__ = (ce); \
|
|
|
|
ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \
|
|
|
|
ce__->timeline->fence_context, \
|
|
|
|
##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
2019-03-08 20:25:17 +07:00
|
|
|
void intel_context_init(struct intel_context *ce,
|
|
|
|
struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine);
|
2019-07-18 14:00:06 +07:00
|
|
|
void intel_context_fini(struct intel_context *ce);
|
2019-03-08 20:25:17 +07:00
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
struct intel_context *
|
2019-04-26 23:33:34 +07:00
|
|
|
intel_context_create(struct i915_gem_context *ctx,
|
2019-03-08 20:25:19 +07:00
|
|
|
struct intel_engine_cs *engine);
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
void intel_context_free(struct intel_context *ce);
|
|
|
|
|
2019-03-08 20:25:19 +07:00
|
|
|
/**
|
2019-04-26 23:33:32 +07:00
|
|
|
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
|
|
|
|
* @ce - the context
|
2019-03-08 20:25:19 +07:00
|
|
|
*
|
2019-03-08 20:25:22 +07:00
|
|
|
* Acquire a lock on the pinned status of the HW context, such that the context
|
|
|
|
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
|
|
|
|
* intel_context_is_pinned() remains stable.
|
2019-03-08 20:25:19 +07:00
|
|
|
*/
|
2019-04-26 23:33:32 +07:00
|
|
|
static inline int intel_context_lock_pinned(struct intel_context *ce)
|
|
|
|
__acquires(ce->pin_mutex)
|
|
|
|
{
|
|
|
|
return mutex_lock_interruptible(&ce->pin_mutex);
|
|
|
|
}
|
2019-03-08 20:25:19 +07:00
|
|
|
|
2019-04-26 23:33:32 +07:00
|
|
|
/**
|
|
|
|
* intel_context_is_pinned - Reports the 'pinned' status
|
|
|
|
* @ce - the context
|
|
|
|
*
|
|
|
|
* While in use by the GPU, the context, along with its ring and page
|
|
|
|
* tables is pinned into memory and the GTT.
|
|
|
|
*
|
|
|
|
* Returns: true if the context is currently pinned for use by the GPU.
|
|
|
|
*/
|
2019-03-08 20:25:22 +07:00
|
|
|
static inline bool
|
|
|
|
intel_context_is_pinned(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return atomic_read(&ce->pin_count);
|
|
|
|
}
|
|
|
|
|
2019-04-26 23:33:32 +07:00
|
|
|
/**
|
|
|
|
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
|
|
|
|
* @ce - the context
|
|
|
|
*
|
|
|
|
* Releases the lock earlier acquired by intel_context_unlock_pinned().
|
|
|
|
*/
|
|
|
|
static inline void intel_context_unlock_pinned(struct intel_context *ce)
|
|
|
|
__releases(ce->pin_mutex)
|
|
|
|
{
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
|
|
|
}
|
2019-03-08 20:25:22 +07:00
|
|
|
|
2019-04-26 23:33:29 +07:00
|
|
|
int __intel_context_do_pin(struct intel_context *ce);
|
|
|
|
|
|
|
|
static inline int intel_context_pin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __intel_context_do_pin(ce);
|
|
|
|
}
|
2019-03-08 20:25:17 +07:00
|
|
|
|
|
|
|
static inline void __intel_context_pin(struct intel_context *ce)
|
|
|
|
{
|
2019-03-08 20:25:22 +07:00
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(ce));
|
|
|
|
atomic_inc(&ce->pin_count);
|
2019-03-08 20:25:17 +07:00
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:22 +07:00
|
|
|
void intel_context_unpin(struct intel_context *ce);
|
2019-03-08 20:25:17 +07:00
|
|
|
|
2019-04-25 03:07:15 +07:00
|
|
|
void intel_context_enter_engine(struct intel_context *ce);
|
|
|
|
void intel_context_exit_engine(struct intel_context *ce);
|
|
|
|
|
|
|
|
static inline void intel_context_enter(struct intel_context *ce)
|
|
|
|
{
|
2019-08-16 19:09:59 +07:00
|
|
|
lockdep_assert_held(&ce->timeline->mutex);
|
2019-04-25 03:07:15 +07:00
|
|
|
if (!ce->active_count++)
|
|
|
|
ce->ops->enter(ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_mark_active(struct intel_context *ce)
|
|
|
|
{
|
2019-08-16 19:09:59 +07:00
|
|
|
lockdep_assert_held(&ce->timeline->mutex);
|
2019-04-25 03:07:15 +07:00
|
|
|
++ce->active_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_exit(struct intel_context *ce)
|
|
|
|
{
|
2019-08-16 19:09:59 +07:00
|
|
|
lockdep_assert_held(&ce->timeline->mutex);
|
2019-04-25 03:07:15 +07:00
|
|
|
GEM_BUG_ON(!ce->active_count);
|
|
|
|
if (!--ce->active_count)
|
|
|
|
ce->ops->exit(ce);
|
|
|
|
}
|
|
|
|
|
2019-08-02 17:00:15 +07:00
|
|
|
int intel_context_active_acquire(struct intel_context *ce);
|
|
|
|
void intel_context_active_release(struct intel_context *ce);
|
drm/i915: Keep contexts pinned until after the next kernel context switch
We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.
The sequence of operations for keeping the context pinned until saved is:
- On context activation, we preallocate a node for each physical engine
the context may operate on. This is to avoid allocations during
unpinning, which may be from inside FS_RECLAIM context (aka the
shrinker)
- On context deactivation on retirement of the last active request (which
is before we know the context has been saved), we add the
preallocated node onto a barrier list on each engine
- On engine idling, we emit a switch to kernel context. When this
switch completes, we know that all previous contexts must have been
saved, and so on retiring this request we can finally unpin all the
contexts that were marked as deactivated prior to the switch.
We can enhance this in future by flushing all the idle contexts on a
regular heartbeat pulse of a switch to kernel context, which will also
be used to check for hung engines.
v2: intel_context_active_acquire/_release
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk
2019-06-14 23:46:04 +07:00
|
|
|
|
2019-03-19 04:23:47 +07:00
|
|
|
static inline struct intel_context *intel_context_get(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kref_get(&ce->ref);
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void intel_context_put(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kref_put(&ce->ref, ce->ops->destroy);
|
|
|
|
}
|
|
|
|
|
2019-08-16 03:57:09 +07:00
|
|
|
static inline struct intel_timeline *__must_check
|
2019-06-10 17:36:10 +07:00
|
|
|
intel_context_timeline_lock(struct intel_context *ce)
|
2019-08-10 01:25:18 +07:00
|
|
|
__acquires(&ce->timeline->mutex)
|
2019-04-25 03:07:16 +07:00
|
|
|
{
|
2019-08-16 03:57:09 +07:00
|
|
|
struct intel_timeline *tl = ce->timeline;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mutex_lock_interruptible(&tl->mutex);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return tl;
|
2019-04-25 03:07:16 +07:00
|
|
|
}
|
|
|
|
|
2019-08-16 03:57:09 +07:00
|
|
|
static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
|
|
|
|
__releases(&tl->mutex)
|
2019-04-25 03:07:16 +07:00
|
|
|
{
|
2019-08-16 03:57:09 +07:00
|
|
|
mutex_unlock(&tl->mutex);
|
2019-04-25 03:07:16 +07:00
|
|
|
}
|
|
|
|
|
2019-07-17 04:34:43 +07:00
|
|
|
int intel_context_prepare_remote_request(struct intel_context *ce,
|
|
|
|
struct i915_request *rq);
|
|
|
|
|
2019-04-26 23:33:34 +07:00
|
|
|
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
|
|
|
|
2019-08-10 01:25:17 +07:00
|
|
|
static inline struct intel_ring *__intel_context_ring_size(u64 sz)
|
|
|
|
{
|
|
|
|
return u64_to_ptr(struct intel_ring, sz);
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:12:29 +07:00
|
|
|
static inline bool intel_context_is_banned(const struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return test_bit(CONTEXT_BANNED, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool intel_context_set_banned(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
intel_context_force_single_submission(const struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
intel_context_set_single_submission(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
intel_context_nopreempt(const struct intel_context *ce)
|
|
|
|
{
|
|
|
|
return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
intel_context_set_nopreempt(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
set_bit(CONTEXT_NOPREEMPT, &ce->flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
intel_context_clear_nopreempt(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
|
|
|
|
}
|
|
|
|
|
2019-03-08 20:25:17 +07:00
|
|
|
#endif /* __INTEL_CONTEXT_H__ */
|