linux_dsm_epyc7002/drivers/gpu/drm/i915/i915_vma.c
Chris Wilson e61e0f51ba drm/i915: Rename drm_i915_gem_request to i915_request
We want to de-emphasize the link between the request (dependency,
execution and fence tracking) from GEM and so rename the struct from
drm_i915_gem_request to i915_request. That is we may implement the GEM
user interface on top of requests, but they are an abstraction for
tracking execution rather than an implementation detail of GEM. (Since
they are not tied to HW, we keep the i915 prefix as opposed to intel.)

In short, the spatch:
@@

@@
- struct drm_i915_gem_request
+ struct i915_request

A corollary to contracting the type name, we also harmonise on using
'rq' shorthand for local variables where space if of the essence and
repetition makes 'request' unwieldy. For globals and struct members,
'request' is still much preferred for its clarity.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180221095636.6649-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2018-02-21 20:57:22 +00:00

854 lines
22 KiB
C

/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_vma.h"
#include "i915_drv.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
static void
i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
const unsigned int idx = rq->engine->id;
struct i915_vma *vma =
container_of(active, struct i915_vma, last_read[idx]);
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
i915_vma_clear_active(vma, idx);
if (i915_vma_is_active(vma))
return;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count)
return;
/* Prune the shared fence arrays iff completely idle (inc. external) */
if (reservation_object_trylock(obj->resv)) {
if (reservation_object_test_signaled_rcu(obj->resv, true))
reservation_object_add_excl_fence(obj->resv, NULL);
reservation_object_unlock(obj->resv);
}
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
spin_lock(&rq->i915->mm.obj_lock);
if (obj->bind_count)
list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
spin_unlock(&rq->i915->mm.obj_lock);
obj->mm.dirty = true; /* be paranoid */
if (i915_gem_object_has_active_reference(obj)) {
i915_gem_object_clear_active_reference(obj);
i915_gem_object_put(obj);
}
}
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb, **p;
int i;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
vma->obj = obj;
vma->resv = obj->resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->partial.offset,
view->partial.size,
obj->base.size >> PAGE_SHIFT));
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
}
}
if (unlikely(vma->size > vm->total))
goto err_vma;
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_vma;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size || /* overflow */
vma->fence_size > vm->total))
goto err_vma;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
/*
* We put the GGTT vma at the start of the vma-list, followed
* by the ppGGTT vma. This allows us to break early when
* iterating over only the GGTT vma for an object, see
* for_each_ggtt_vma()
*/
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
list_add_tail(&vma->obj_link, &obj->vma_list);
}
rb = NULL;
p = &obj->vma_tree.rb_node;
while (*p) {
struct i915_vma *pos;
rb = *p;
pos = rb_entry(rb, struct i915_vma, obj_node);
if (i915_vma_compare(pos, vm, view) < 0)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma_tree);
list_add(&vma->vm_link, &vm->unbound_list);
return vma;
err_vma:
kmem_cache_free(vm->i915->vmas, vma);
return ERR_PTR(-E2BIG);
}
static struct i915_vma *
vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct rb_node *rb;
rb = obj->vma_tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
cmp = i915_vma_compare(vma, vm, view);
if (cmp == 0)
return vma;
if (cmp < 0)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
return NULL;
}
/**
* i915_vma_instance - return the singleton instance of the VMA
* @obj: parent &struct drm_i915_gem_object to be mapped
* @vm: address space in which the mapping is located
* @view: additional mapping requirements
*
* i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
* the same @view characteristics. If a match is not found, one is created.
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
* Must be called with struct_mutex held.
*
* Returns the vma, or an error pointer.
*/
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(vm->closed);
vma = vma_lookup(obj, vm, view);
if (!vma)
vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
return vma;
}
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
u32 bind_flags;
u32 vma_flags;
int ret;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
if (GEM_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total)))
return -ENODEV;
if (GEM_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
if (flags & PIN_GLOBAL)
bind_flags |= I915_VMA_GLOBAL_BIND;
if (flags & PIN_USER)
bind_flags |= I915_VMA_LOCAL_BIND;
vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
if (flags & PIN_UPDATE)
bind_flags |= vma_flags;
else
bind_flags &= ~vma_flags;
if (bind_flags == 0)
return 0;
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
vma->flags |= bind_flags;
return 0;
}
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
int err;
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(vma->vm->i915);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
ptr = vma->iomap;
if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
vma->iomap = ptr;
}
__i915_vma_pin(vma);
err = i915_vma_pin_fence(vma);
if (err)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
return ptr;
err_unpin:
__i915_vma_unpin(vma);
err:
return IO_ERR_PTR(err);
}
void i915_vma_flush_writes(struct i915_vma *vma)
{
if (!i915_vma_has_ggtt_write(vma))
return;
i915_gem_flush_ggtt_writes(vma->vm->i915);
i915_vma_unset_ggtt_write(vma);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
}
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
obj = vma->obj;
i915_vma_unpin(vma);
i915_vma_close(vma);
__i915_gem_object_release_unless_active(obj);
}
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
if (!drm_mm_node_allocated(&vma->node))
return false;
if (vma->node.size < size)
return true;
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
vma->node.start < (flags & PIN_OFFSET_MASK))
return true;
if (flags & PIN_OFFSET_FIXED &&
vma->node.start != (flags & PIN_OFFSET_MASK))
return true;
return false;
}
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
bool mappable, fenceable;
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
/*
* Explicitly disable for rotated VMA since the display does not
* need the fence and the VMA is not accessible to other users.
*/
if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
return;
fenceable = (vma->node.size >= vma->fence_size &&
IS_ALIGNED(vma->node.start, vma->fence_alignment));
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
}
static bool color_differs(struct drm_mm_node *node, unsigned long color)
{
return node->allocated && node->color != color;
}
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
/*
* On some machines we have to be careful when putting differing types
* of snoopable memory together to avoid the prefetcher crossing memory
* domains and dying. During vm initialisation, we decide whether or not
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
if (vma->vm->mm.color_adjust == NULL)
return true;
/* Only valid to be called on an already inserted vma */
GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
return false;
return true;
}
/**
* i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma
* @size: requested size in bytes (can be larger than the VMA)
* @alignment: required alignment
* @flags: mask of PIN_* flags to use
*
* First we try to allocate some free space that meets the requirements for
* the VMA. Failiing that, if the flags permit, it will evict an old VMA,
* preferrably the oldest idle entry to make room for the new VMA.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
alignment = max(alignment, vma->display_alignment);
if (flags & PIN_MAPPABLE) {
size = max_t(typeof(size), size, vma->fence_size);
alignment = max_t(typeof(alignment),
alignment, vma->fence_alignment);
}
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
* attempt to find space.
*/
if (size > end) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
size, obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return -ENOSPC;
}
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ret;
GEM_BUG_ON(vma->pages);
ret = vma->vm->set_pages(vma);
if (ret)
goto err_unpin;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_clear;
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level,
flags);
if (ret)
goto err_clear;
} else {
/*
* We only support huge gtt pages through the 48b PPGTT,
* however we also don't want to force any alignment for
* objects which need to be tightly packed into the low 32bits.
*
* Note that we assume that GGTT are limited to 4GiB for the
* forseeable future. See also i915_ggtt_offset().
*/
if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
/*
* We can't mix 64K and 4K PTEs in the same page-table
* (2M block), and so to avoid the ugliness and
* complexity of coloring we opt for just aligning 64K
* objects to 2M.
*/
u64 page_alignment =
rounddown_pow_of_two(vma->page_sizes.sg |
I915_GTT_PAGE_SIZE_2M);
/*
* Check we don't expand for the limited Global GTT
* (mappable aperture is even more precious!). This
* also checks that we exclude the aliasing-ppgtt.
*/
GEM_BUG_ON(i915_vma_is_ggtt(vma));
alignment = max(alignment, page_alignment);
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level,
start, end, flags);
if (ret)
goto err_clear;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
err_clear:
vma->vm->clear_pages(vma);
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
}
static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
vma->vm->clear_pages(vma);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
spin_lock(&i915->mm.obj_lock);
if (--obj->bind_count == 0)
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
const unsigned int bound = vma->flags;
int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY;
goto err_unpin;
}
if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags);
if (ret)
goto err_unpin;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
goto err_remove;
GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
i915_vma_remove(vma);
GEM_BUG_ON(vma->pages);
GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
}
err_unpin:
__i915_vma_unpin(vma);
return ret;
}
static void i915_vma_destroy(struct i915_vma *vma)
{
int i;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
list_del(&vma->obj_link);
list_del(&vma->vm_link);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
void i915_vma_close(struct i915_vma *vma)
{
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma));
}
static void __i915_vma_iounmap(struct i915_vma *vma)
{
GEM_BUG_ON(i915_vma_is_pinned(vma));
if (vma->iomap == NULL)
return;
io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
}
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
i915_vma_unset_userfault(vma);
if (!--vma->obj->userfault_count)
list_del(&vma->obj->userfault_link);
}
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
unsigned long active;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
/* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
might_sleep();
active = i915_vma_get_active(vma);
if (active) {
int idx;
/* When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
*
* Even more scary is that the retire callback may free
* the object (last active vma). To prevent the explosion
* we defer the actual object free to a worker that can
* only proceed once it acquires the struct_mutex (which
* we currently hold, therefore it cannot free this object
* before we are finished).
*/
__i915_vma_pin(vma);
for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx],
&vma->vm->i915->drm.struct_mutex);
if (ret)
break;
}
if (!ret) {
ret = i915_gem_active_retire(&vma->last_fence,
&vma->vm->i915->drm.struct_mutex);
}
__i915_vma_unpin(vma);
if (ret)
return ret;
}
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma))
return -EBUSY;
if (!drm_mm_node_allocated(&vma->node))
goto destroy;
GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
* before the unbind, other due to non-strict nature of those
* indirect writes they may end up referencing the GGTT PTE
* after the unbind.
*/
i915_vma_flush_writes(vma);
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
ret = i915_vma_put_fence(vma);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
vma->vm->unbind_vma(vma);
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
i915_vma_remove(vma);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
i915_vma_destroy(vma);
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif