mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 06:23:17 +07:00
drm/i915: Flush the freed object list on file close
As we increase the number of RCU objects, it becomes easier for us to have several hundred thousand objects in the deferred RCU free queues. An example is gem_ctx_create/files which continually creates active contexts, which are not immediately freed upon close as they are kept alive by outstanding requests. This lack of backpressure allows the context objects to persist until they overwhelm and starve the system. We can increase our backpressure by flushing the freed object queue upon closing the device fd which should then not impact other clients. Testcase: igt/gem_ctx_create/*files Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
1aff1903d0
commit
515b8b7e93
@ -211,48 +211,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
|
||||
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
||||
{
|
||||
struct llist_node *freed;
|
||||
struct llist_node *freed = llist_del_all(&i915->mm.free_list);
|
||||
|
||||
/* Free the oldest, most stale object to keep the free_list short */
|
||||
freed = NULL;
|
||||
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
|
||||
/* Only one consumer of llist_del_first() allowed */
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
freed = llist_del_first(&i915->mm.free_list);
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
}
|
||||
if (unlikely(freed)) {
|
||||
freed->next = NULL;
|
||||
if (unlikely(freed))
|
||||
__i915_gem_free_objects(i915, freed);
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_free_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(work, struct drm_i915_private, mm.free_work);
|
||||
struct llist_node *freed;
|
||||
|
||||
/*
|
||||
* All file-owned VMA should have been released by this point through
|
||||
* i915_gem_close_object(), or earlier by i915_gem_context_close().
|
||||
* However, the object may also be bound into the global GTT (e.g.
|
||||
* older GPUs without per-process support, or for direct access through
|
||||
* the GTT either for the user or for scanout). Those VMA still need to
|
||||
* unbound now.
|
||||
*/
|
||||
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
while ((freed = llist_del_all(&i915->mm.free_list))) {
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
|
||||
__i915_gem_free_objects(i915, freed);
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
}
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
i915_gem_flush_free_objects(i915);
|
||||
}
|
||||
|
||||
void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
@ -2052,6 +2052,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
kfree(file_priv);
|
||||
|
||||
/* Catch up with all the deferred frees from "this" client */
|
||||
i915_gem_flush_free_objects(to_i915(dev));
|
||||
}
|
||||
|
||||
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
|
@ -773,7 +773,6 @@ struct i915_gem_mm {
|
||||
*/
|
||||
struct llist_head free_list;
|
||||
struct work_struct free_work;
|
||||
spinlock_t free_lock;
|
||||
/**
|
||||
* Count of objects pending destructions. Used to skip needlessly
|
||||
* waiting on an RCU barrier if no objects are waiting to be freed.
|
||||
|
@ -1648,7 +1648,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
|
||||
static void i915_gem_init__mm(struct drm_i915_private *i915)
|
||||
{
|
||||
spin_lock_init(&i915->mm.obj_lock);
|
||||
spin_lock_init(&i915->mm.free_lock);
|
||||
|
||||
init_llist_head(&i915->mm.free_list);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user