mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 00:06:39 +07:00
drm/i915: Prepare for non-object vma
In order to allow ourselves to use VMA to wrap other entities other than GEM objects, we need to allow for the vma->obj backpointer to be NULL. In most cases, we know we are operating on a GEM object and its vma, but we need the core code (such as i915_vma_pin/insert/bind/unbind) to work regardless of the innards. The remaining eyesore here is vma->obj->cache_level and related (but less of an issue) vma->obj->gt_ro. With a bit of care we should mirror those on the vma itself. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Matthew Auld <matthew.william.auld@gmail.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180607154047.9171-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
521370106d
commit
520ea7c581
@ -3588,8 +3588,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
if (!i915_vma_unbind(vma))
|
||||
continue;
|
||||
|
||||
WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
|
||||
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
|
||||
WARN_ON(i915_vma_bind(vma,
|
||||
obj ? obj->cache_level : 0,
|
||||
PIN_UPDATE));
|
||||
if (obj)
|
||||
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
|
||||
}
|
||||
|
||||
ggtt->vm.closed = false;
|
||||
|
@ -1050,6 +1050,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(vma, head, vm_link) {
|
||||
if (!vma->obj)
|
||||
continue;
|
||||
|
||||
if (pinned_only && !i915_vma_is_pinned(vma))
|
||||
continue;
|
||||
|
||||
|
@ -345,7 +345,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
|
||||
|
||||
void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
|
||||
GEM_BUG_ON(vma->iomap == NULL);
|
||||
|
||||
@ -365,6 +365,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
|
||||
return;
|
||||
|
||||
obj = vma->obj;
|
||||
GEM_BUG_ON(!obj);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
@ -489,7 +490,7 @@ static int
|
||||
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
unsigned int cache_level;
|
||||
u64 start, end;
|
||||
int ret;
|
||||
|
||||
@ -524,16 +525,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
* attempt to find space.
|
||||
*/
|
||||
if (size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
|
||||
size, obj->base.size,
|
||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
|
||||
size, flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
end);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (vma->obj) {
|
||||
ret = i915_gem_object_pin_pages(vma->obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cache_level = vma->obj->cache_level;
|
||||
} else {
|
||||
cache_level = 0;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(vma->pages);
|
||||
|
||||
@ -550,7 +556,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
|
||||
size, offset, obj->cache_level,
|
||||
size, offset, cache_level,
|
||||
flags);
|
||||
if (ret)
|
||||
goto err_clear;
|
||||
@ -589,7 +595,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
|
||||
size, alignment, obj->cache_level,
|
||||
size, alignment, cache_level,
|
||||
start, end, flags);
|
||||
if (ret)
|
||||
goto err_clear;
|
||||
@ -598,23 +604,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
GEM_BUG_ON(vma->node.start + vma->node.size > end);
|
||||
}
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
|
||||
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
|
||||
|
||||
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
if (vma->obj) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
assert_bind_count(obj);
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
assert_bind_count(obj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_clear:
|
||||
vma->vm->clear_pages(vma);
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
if (vma->obj)
|
||||
i915_gem_object_unpin_pages(vma->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -622,7 +633,6 @@ static void
|
||||
i915_vma_remove(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
|
||||
@ -632,20 +642,26 @@ i915_vma_remove(struct i915_vma *vma)
|
||||
drm_mm_remove_node(&vma->node);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
/*
|
||||
* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
*/
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
if (vma->obj) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
assert_bind_count(obj);
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
/*
|
||||
* And finally now the object is completely decoupled from this
|
||||
* vma, we can drop its hold on the backing storage and allow
|
||||
* it to be reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
assert_bind_count(obj);
|
||||
}
|
||||
}
|
||||
|
||||
int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
@ -670,7 +686,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
}
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
|
||||
ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
|
||||
if (ret)
|
||||
goto err_remove;
|
||||
|
||||
@ -727,6 +743,7 @@ void i915_vma_reopen(struct i915_vma *vma)
|
||||
|
||||
static void __i915_vma_destroy(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
int i;
|
||||
|
||||
GEM_BUG_ON(vma->node.allocated);
|
||||
@ -738,12 +755,13 @@ static void __i915_vma_destroy(struct i915_vma *vma)
|
||||
|
||||
list_del(&vma->obj_link);
|
||||
list_del(&vma->vm_link);
|
||||
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
|
||||
if (vma->obj)
|
||||
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
|
||||
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
|
||||
|
||||
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
|
||||
kmem_cache_free(i915->vmas, vma);
|
||||
}
|
||||
|
||||
void i915_vma_destroy(struct i915_vma *vma)
|
||||
@ -809,13 +827,13 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
|
||||
|
||||
int i915_vma_unbind(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
unsigned long active;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
|
||||
/* First wait upon any activity as retiring the request may
|
||||
/*
|
||||
* First wait upon any activity as retiring the request may
|
||||
* have side-effects such as unpinning or even unbinding this vma.
|
||||
*/
|
||||
might_sleep();
|
||||
@ -823,7 +841,8 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
if (active) {
|
||||
int idx;
|
||||
|
||||
/* When a closed VMA is retired, it is unbound - eek.
|
||||
/*
|
||||
* When a closed VMA is retired, it is unbound - eek.
|
||||
* In order to prevent it from being recursively closed,
|
||||
* take a pin on the vma so that the second unbind is
|
||||
* aborted.
|
||||
@ -861,9 +880,6 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
return 0;
|
||||
|
||||
GEM_BUG_ON(obj->bind_count == 0);
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
if (i915_vma_is_map_and_fenceable(vma)) {
|
||||
/*
|
||||
* Check that we have flushed all writes through the GGTT
|
||||
|
@ -407,7 +407,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
|
||||
static inline void
|
||||
i915_vma_unpin_fence(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
/* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
|
||||
if (vma->fence)
|
||||
__i915_vma_unpin_fence(vma);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user