drm/i915/gem: Only revoke mmap handlers if active

Avoid waking up the device and taking stale locks if we know that the
object is not currently mmapped. This is particularly useful as not many
object are actually mmapped and so we can destroy them without waking
the device up, and gives us a little more freedom of workqueue ordering
during shutdown.

v2: Pull the release_mmap() into its single user in freeing the objects,
where there can not be any race with a concurrent user of the freed
object. Or so one hopes!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
Link: https://patchwork.freedesktop.org/patch/msgid/20200702163623.6402-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2020-07-02 17:36:23 +01:00
parent cb2baf42dc
commit db8337853b
3 changed files with 24 additions and 28 deletions

View File

@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_unlock(&obj->mmo.lock);
}
/**
* i915_gem_object_release_mmap - remove physical page mappings
* @obj: obj in question
*
* Preserve the reservation of the mmapping with the DRM core code, but
* relinquish ownership of the pages back to the system.
*/
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
i915_gem_object_release_mmap_gtt(obj);
i915_gem_object_release_mmap_offset(obj);
}
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)

View File

@ -24,8 +24,6 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
struct drm_device *dev,
u32 handle, u64 *offset);
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);

View File

@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
atomic_dec(&i915->mm.free_count);
}
static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */
if (obj->userfault_count)
i915_gem_object_release_mmap_gtt(obj);
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
struct i915_mmap_offset *mmo, *mn;
i915_gem_object_release_mmap_offset(obj);
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
obj->mmo.offsets = RB_ROOT;
}
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_mmap_offset *mmo, *mn;
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}
i915_gem_object_release_mmap(obj);
__i915_gem_object_free_mmaps(obj);
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);