mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-22 08:53:18 +07:00
drm/i915: Move list of timelines under its own lock
Currently, the list of timelines is serialised by the struct_mutex, but to alleviate difficulties with using that mutex in future, move the list management under its own dedicated mutex. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190128102356.15037-5-chris@chris-wilson.co.uk
This commit is contained in:
parent
0ca88ba0d6
commit
1e345568e3
@ -1975,7 +1975,10 @@ struct drm_i915_private {
|
||||
void (*resume)(struct drm_i915_private *);
|
||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||
|
||||
struct list_head timelines;
|
||||
struct i915_gt_timelines {
|
||||
struct mutex mutex; /* protects list, tainted by GPU */
|
||||
struct list_head list;
|
||||
} timelines;
|
||||
|
||||
struct list_head active_rings;
|
||||
struct list_head closed_vma;
|
||||
|
@ -3222,33 +3222,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long wait_for_timeline(struct i915_timeline *tl,
|
||||
unsigned int flags, long timeout)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = i915_gem_active_get_unlocked(&tl->last_request);
|
||||
if (!rq)
|
||||
return timeout;
|
||||
|
||||
/*
|
||||
* "Race-to-idle".
|
||||
*
|
||||
* Switching to the kernel context is often used a synchronous
|
||||
* step prior to idling, e.g. in suspend for flushing all
|
||||
* current operations to memory before sleeping. These we
|
||||
* want to complete as quickly as possible to avoid prolonged
|
||||
* stalls, so allow the gpu to boost to maximum clocks.
|
||||
*/
|
||||
if (flags & I915_WAIT_FOR_IDLE_BOOST)
|
||||
gen6_rps_boost(rq, NULL);
|
||||
|
||||
timeout = i915_request_wait(rq, flags, timeout);
|
||||
i915_request_put(rq);
|
||||
|
||||
return timeout;
|
||||
}
|
||||
|
||||
static int wait_for_engines(struct drm_i915_private *i915)
|
||||
{
|
||||
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
|
||||
@ -3262,6 +3235,52 @@ static int wait_for_engines(struct drm_i915_private *i915)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long
|
||||
wait_for_timelines(struct drm_i915_private *i915,
|
||||
unsigned int flags, long timeout)
|
||||
{
|
||||
struct i915_gt_timelines *gt = &i915->gt.timelines;
|
||||
struct i915_timeline *tl;
|
||||
|
||||
if (!READ_ONCE(i915->gt.active_requests))
|
||||
return timeout;
|
||||
|
||||
mutex_lock(>->mutex);
|
||||
list_for_each_entry(tl, >->list, link) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = i915_gem_active_get_unlocked(&tl->last_request);
|
||||
if (!rq)
|
||||
continue;
|
||||
|
||||
mutex_unlock(>->mutex);
|
||||
|
||||
/*
|
||||
* "Race-to-idle".
|
||||
*
|
||||
* Switching to the kernel context is often used a synchronous
|
||||
* step prior to idling, e.g. in suspend for flushing all
|
||||
* current operations to memory before sleeping. These we
|
||||
* want to complete as quickly as possible to avoid prolonged
|
||||
* stalls, so allow the gpu to boost to maximum clocks.
|
||||
*/
|
||||
if (flags & I915_WAIT_FOR_IDLE_BOOST)
|
||||
gen6_rps_boost(rq, NULL);
|
||||
|
||||
timeout = i915_request_wait(rq, flags, timeout);
|
||||
i915_request_put(rq);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
|
||||
/* restart after reacquiring the lock */
|
||||
mutex_lock(>->mutex);
|
||||
tl = list_entry(>->list, typeof(*tl), link);
|
||||
}
|
||||
mutex_unlock(>->mutex);
|
||||
|
||||
return timeout;
|
||||
}
|
||||
|
||||
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
|
||||
unsigned int flags, long timeout)
|
||||
{
|
||||
@ -3273,17 +3292,15 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
|
||||
if (!READ_ONCE(i915->gt.awake))
|
||||
return 0;
|
||||
|
||||
timeout = wait_for_timelines(i915, flags, timeout);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
|
||||
if (flags & I915_WAIT_LOCKED) {
|
||||
struct i915_timeline *tl;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
||||
timeout = wait_for_timeline(tl, flags, timeout);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
}
|
||||
if (GEM_SHOW_DEBUG() && !timeout) {
|
||||
/* Presume that timeout was non-zero to begin with! */
|
||||
dev_warn(&i915->drm.pdev->dev,
|
||||
@ -3297,17 +3314,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
|
||||
|
||||
i915_retire_requests(i915);
|
||||
GEM_BUG_ON(i915->gt.active_requests);
|
||||
} else {
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct i915_timeline *tl = &engine->timeline;
|
||||
|
||||
timeout = wait_for_timeline(tl, flags, timeout);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -5008,6 +5014,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
|
||||
}
|
||||
|
||||
i915_timelines_init(dev_priv);
|
||||
|
||||
ret = i915_gem_init_userptr(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -5130,8 +5138,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
err_uc_misc:
|
||||
intel_uc_fini_misc(dev_priv);
|
||||
|
||||
if (ret != -EIO)
|
||||
if (ret != -EIO) {
|
||||
i915_gem_cleanup_userptr(dev_priv);
|
||||
i915_timelines_fini(dev_priv);
|
||||
}
|
||||
|
||||
if (ret == -EIO) {
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
@ -5182,6 +5192,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
|
||||
|
||||
intel_uc_fini_misc(dev_priv);
|
||||
i915_gem_cleanup_userptr(dev_priv);
|
||||
i915_timelines_fini(dev_priv);
|
||||
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
@ -5284,7 +5295,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
|
||||
if (!dev_priv->priorities)
|
||||
goto err_dependencies;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->gt.timelines);
|
||||
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
|
||||
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
|
||||
|
||||
@ -5328,7 +5338,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
|
||||
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
|
||||
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
|
||||
WARN_ON(dev_priv->mm.object_count);
|
||||
WARN_ON(!list_empty(&dev_priv->gt.timelines));
|
||||
|
||||
kmem_cache_destroy(dev_priv->priorities);
|
||||
kmem_cache_destroy(dev_priv->dependencies);
|
||||
|
@ -854,7 +854,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||
*
|
||||
* No more can be submitted until we reset the wedged bit.
|
||||
*/
|
||||
list_for_each_entry(tl, &i915->gt.timelines, link) {
|
||||
mutex_lock(&i915->gt.timelines.mutex);
|
||||
list_for_each_entry(tl, &i915->gt.timelines.list, link) {
|
||||
struct i915_request *rq;
|
||||
long timeout;
|
||||
|
||||
@ -876,9 +877,12 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||
timeout = dma_fence_default_wait(&rq->fence, true,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
i915_request_put(rq);
|
||||
if (timeout < 0)
|
||||
if (timeout < 0) {
|
||||
mutex_unlock(&i915->gt.timelines.mutex);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&i915->gt.timelines.mutex);
|
||||
|
||||
intel_engines_sanitize(i915, false);
|
||||
|
||||
|
@ -13,7 +13,7 @@ void i915_timeline_init(struct drm_i915_private *i915,
|
||||
struct i915_timeline *timeline,
|
||||
const char *name)
|
||||
{
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
struct i915_gt_timelines *gt = &i915->gt.timelines;
|
||||
|
||||
/*
|
||||
* Ideally we want a set of engines on a single leaf as we expect
|
||||
@ -23,9 +23,12 @@ void i915_timeline_init(struct drm_i915_private *i915,
|
||||
*/
|
||||
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
|
||||
|
||||
timeline->i915 = i915;
|
||||
timeline->name = name;
|
||||
|
||||
list_add(&timeline->link, &i915->gt.timelines);
|
||||
mutex_lock(>->mutex);
|
||||
list_add(&timeline->link, >->list);
|
||||
mutex_unlock(>->mutex);
|
||||
|
||||
/* Called during early_init before we know how many engines there are */
|
||||
|
||||
@ -39,6 +42,17 @@ void i915_timeline_init(struct drm_i915_private *i915,
|
||||
i915_syncmap_init(&timeline->sync);
|
||||
}
|
||||
|
||||
void i915_timelines_init(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gt_timelines *gt = &i915->gt.timelines;
|
||||
|
||||
mutex_init(>->mutex);
|
||||
INIT_LIST_HEAD(>->list);
|
||||
|
||||
/* via i915_gem_wait_for_idle() */
|
||||
i915_gem_shrinker_taints_mutex(i915, >->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_timelines_park - called when the driver idles
|
||||
* @i915: the drm_i915_private device
|
||||
@ -51,11 +65,11 @@ void i915_timeline_init(struct drm_i915_private *i915,
|
||||
*/
|
||||
void i915_timelines_park(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gt_timelines *gt = &i915->gt.timelines;
|
||||
struct i915_timeline *timeline;
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
list_for_each_entry(timeline, &i915->gt.timelines, link) {
|
||||
mutex_lock(>->mutex);
|
||||
list_for_each_entry(timeline, >->list, link) {
|
||||
/*
|
||||
* All known fences are completed so we can scrap
|
||||
* the current sync point tracking and start afresh,
|
||||
@ -64,15 +78,20 @@ void i915_timelines_park(struct drm_i915_private *i915)
|
||||
*/
|
||||
i915_syncmap_free(&timeline->sync);
|
||||
}
|
||||
mutex_unlock(>->mutex);
|
||||
}
|
||||
|
||||
void i915_timeline_fini(struct i915_timeline *timeline)
|
||||
{
|
||||
struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
|
||||
|
||||
GEM_BUG_ON(!list_empty(&timeline->requests));
|
||||
|
||||
i915_syncmap_free(&timeline->sync);
|
||||
|
||||
mutex_lock(>->mutex);
|
||||
list_del(&timeline->link);
|
||||
mutex_unlock(>->mutex);
|
||||
}
|
||||
|
||||
struct i915_timeline *
|
||||
@ -99,6 +118,15 @@ void __i915_timeline_free(struct kref *kref)
|
||||
kfree(timeline);
|
||||
}
|
||||
|
||||
void i915_timelines_fini(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gt_timelines *gt = &i915->gt.timelines;
|
||||
|
||||
GEM_BUG_ON(!list_empty(>->list));
|
||||
|
||||
mutex_destroy(>->mutex);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/mock_timeline.c"
|
||||
#include "selftests/i915_timeline.c"
|
||||
|
@ -66,6 +66,7 @@ struct i915_timeline {
|
||||
|
||||
struct list_head link;
|
||||
const char *name;
|
||||
struct drm_i915_private *i915;
|
||||
|
||||
struct kref kref;
|
||||
};
|
||||
@ -134,6 +135,8 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
|
||||
return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
|
||||
}
|
||||
|
||||
void i915_timelines_init(struct drm_i915_private *i915);
|
||||
void i915_timelines_park(struct drm_i915_private *i915);
|
||||
void i915_timelines_fini(struct drm_i915_private *i915);
|
||||
|
||||
#endif
|
||||
|
@ -68,13 +68,14 @@ static void mock_device_release(struct drm_device *dev)
|
||||
i915_gem_contexts_fini(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
i915_timelines_fini(i915);
|
||||
|
||||
drain_workqueue(i915->wq);
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
mock_fini_ggtt(&i915->ggtt);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
WARN_ON(!list_empty(&i915->gt.timelines));
|
||||
|
||||
destroy_workqueue(i915->wq);
|
||||
|
||||
@ -226,7 +227,8 @@ struct drm_i915_private *mock_gem_device(void)
|
||||
if (!i915->priorities)
|
||||
goto err_dependencies;
|
||||
|
||||
INIT_LIST_HEAD(&i915->gt.timelines);
|
||||
i915_timelines_init(i915);
|
||||
|
||||
INIT_LIST_HEAD(&i915->gt.active_rings);
|
||||
INIT_LIST_HEAD(&i915->gt.closed_vma);
|
||||
|
||||
@ -253,6 +255,7 @@ struct drm_i915_private *mock_gem_device(void)
|
||||
i915_gem_contexts_fini(i915);
|
||||
err_unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
i915_timelines_fini(i915);
|
||||
kmem_cache_destroy(i915->priorities);
|
||||
err_dependencies:
|
||||
kmem_cache_destroy(i915->dependencies);
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
void mock_timeline_init(struct i915_timeline *timeline, u64 context)
|
||||
{
|
||||
timeline->i915 = NULL;
|
||||
timeline->fence_context = context;
|
||||
|
||||
spin_lock_init(&timeline->lock);
|
||||
@ -24,5 +25,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
|
||||
|
||||
void mock_timeline_fini(struct i915_timeline *timeline)
|
||||
{
|
||||
i915_timeline_fini(timeline);
|
||||
i915_syncmap_free(&timeline->sync);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user