mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915: use might_lock_nested in get_pages annotation
So strictly speaking the existing annotation is also ok, because we have a chain of obj->mm.lock#I915_MM_GET_PAGES -> fs_reclaim -> obj->mm.lock (the shrinker cannot get at an object while we're in get_pages, hence this is safe). But it's confusing, so try to take the right subclass of the lock. This does a bit reduce our lockdep based checking, but then it's also less fragile, in case we ever change the nesting around. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Will Deacon <will@kernel.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191104173720.2696-3-daniel.vetter@ffwll.ch
This commit is contained in:
parent
e692b4021a
commit
74ceefd10b
@ -271,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
|
||||
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
|
||||
I915_MM_NORMAL = 0,
|
||||
/*
|
||||
* Only used by struct_mutex, when called "recursively" from
|
||||
* direct-reclaim-esque. Safe because there is only every one
|
||||
* struct_mutex in the entire system.
|
||||
*/
|
||||
I915_MM_SHRINKER = 1,
|
||||
/*
|
||||
* Used for obj->mm.lock when allocating pages. Safe because the object
|
||||
* isn't yet on any LRU, and therefore the shrinker can't deadlock on
|
||||
* it. As soon as the object has pages, obj->mm.lock nests within
|
||||
* fs_reclaim.
|
||||
*/
|
||||
I915_MM_GET_PAGES = 1,
|
||||
};
|
||||
|
||||
static inline int __must_check
|
||||
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
might_lock(&obj->mm.lock);
|
||||
might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
|
||||
|
||||
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
|
||||
return 0;
|
||||
@ -317,23 +334,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
__i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
|
||||
I915_MM_NORMAL = 0,
|
||||
/*
|
||||
* Only used by struct_mutex, when called "recursively" from
|
||||
* direct-reclaim-esque. Safe because there is only every one
|
||||
* struct_mutex in the entire system.
|
||||
*/
|
||||
I915_MM_SHRINKER = 1,
|
||||
/*
|
||||
* Used for obj->mm.lock when allocating pages. Safe because the object
|
||||
* isn't yet on any LRU, and therefore the shrinker can't deadlock on
|
||||
* it. As soon as the object has pages, obj->mm.lock nests within
|
||||
* fs_reclaim.
|
||||
*/
|
||||
I915_MM_GET_PAGES = 1,
|
||||
};
|
||||
|
||||
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
|
||||
|
Loading…
Reference in New Issue
Block a user