2020-01-07 20:40:09 +07:00
|
|
|
// SPDX-License-Identifier: MIT
|
2010-11-06 04:23:30 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2010 Daniel Vetter
|
2020-01-07 20:40:09 +07:00
|
|
|
* Copyright © 2020 Intel Corporation
|
2010-11-06 04:23:30 +07:00
|
|
|
*/
|
|
|
|
|
2017-02-14 00:15:44 +07:00
|
|
|
#include <linux/slab.h> /* fault-inject.h is not standalone! */
|
|
|
|
|
|
|
|
#include <linux/fault-inject.h>
|
2017-01-11 18:23:10 +07:00
|
|
|
#include <linux/log2.h>
|
2017-01-11 18:23:12 +07:00
|
|
|
#include <linux/random.h>
|
2014-01-08 22:10:27 +07:00
|
|
|
#include <linux/seq_file.h>
|
2015-10-24 00:43:32 +07:00
|
|
|
#include <linux/stop_machine.h>
|
2017-01-11 18:23:10 +07:00
|
|
|
|
2017-05-09 05:58:17 +07:00
|
|
|
#include <asm/set_memory.h>
|
2019-08-21 16:39:05 +07:00
|
|
|
#include <asm/smp.h>
|
2017-05-09 05:58:17 +07:00
|
|
|
|
2012-10-03 00:01:07 +07:00
|
|
|
#include <drm/i915_drm.h>
|
2017-01-11 18:23:10 +07:00
|
|
|
|
2019-06-13 15:44:16 +07:00
|
|
|
#include "display/intel_frontbuffer.h"
|
2019-06-21 14:07:44 +07:00
|
|
|
#include "gt/intel_gt.h"
|
2019-10-04 20:40:06 +07:00
|
|
|
#include "gt/intel_gt_requests.h"
|
2019-06-13 15:44:16 +07:00
|
|
|
|
2010-11-06 04:23:30 +07:00
|
|
|
#include "i915_drv.h"
|
2019-05-28 16:29:50 +07:00
|
|
|
#include "i915_scatterlist.h"
|
2010-11-06 04:23:30 +07:00
|
|
|
#include "i915_trace.h"
|
2019-05-28 16:29:50 +07:00
|
|
|
#include "i915_vgpu.h"
|
2010-11-06 04:23:30 +07:00
|
|
|
|
2020-01-07 20:40:09 +07:00
|
|
|
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages)
|
2015-03-16 23:00:56 +07:00
|
|
|
{
|
2017-08-23 00:38:28 +07:00
|
|
|
do {
|
2020-01-07 20:40:09 +07:00
|
|
|
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
|
|
|
|
pages->sgl, pages->nents,
|
|
|
|
PCI_DMA_BIDIRECTIONAL,
|
|
|
|
DMA_ATTR_NO_WARN))
|
|
|
|
return 0;
|
2017-08-23 00:38:28 +07:00
|
|
|
|
2018-07-05 01:55:18 +07:00
|
|
|
/*
|
2020-01-07 20:40:09 +07:00
|
|
|
* If the DMA remap fails, one cause can be that we have
|
|
|
|
* too many objects pinned in a small remapping table,
|
|
|
|
* such as swiotlb. Incrementally purge all other objects and
|
|
|
|
* try again - if there are no more pages to remove from
|
|
|
|
* the DMA remapper, i915_gem_shrink will return 0.
|
2018-07-05 01:55:18 +07:00
|
|
|
*/
|
2020-01-07 20:40:09 +07:00
|
|
|
GEM_BUG_ON(obj->mm.pages == pages);
|
|
|
|
} while (i915_gem_shrink(to_i915(obj->base.dev),
|
|
|
|
obj->base.size >> PAGE_SHIFT, NULL,
|
|
|
|
I915_SHRINK_BOUND |
|
|
|
|
I915_SHRINK_UNBOUND));
|
2015-06-25 22:35:11 +07:00
|
|
|
|
2020-01-07 20:40:09 +07:00
|
|
|
return -ENOSPC;
|
2015-06-25 22:35:11 +07:00
|
|
|
}
|
|
|
|
|
2020-01-07 20:40:09 +07:00
|
|
|
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages)
|
2015-06-30 22:16:39 +07:00
|
|
|
{
|
2020-01-07 20:40:09 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
|
|
|
struct device *kdev = &dev_priv->drm.pdev->dev;
|
|
|
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
2017-08-23 00:38:28 +07:00
|
|
|
|
2020-01-07 20:40:09 +07:00
|
|
|
if (unlikely(ggtt->do_idle_maps)) {
|
|
|
|
/* XXX This does not prevent more requests being submitted! */
|
|
|
|
if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
|
|
|
|
-MAX_SCHEDULE_TIMEOUT)) {
|
|
|
|
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
|
|
|
|
/* Wait a bit, in hopes it avoids the hang */
|
|
|
|
udelay(10);
|
|
|
|
}
|
2017-10-07 05:18:25 +07:00
|
|
|
}
|
2017-08-23 00:38:28 +07:00
|
|
|
|
2020-01-07 20:40:09 +07:00
|
|
|
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
|
2014-12-11 00:27:58 +07:00
|
|
|
}
|
|
|
|
|
2017-01-11 18:23:11 +07:00
|
|
|
/**
|
|
|
|
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
|
2017-01-12 23:45:59 +07:00
|
|
|
* @vm: the &struct i915_address_space
|
|
|
|
* @node: the &struct drm_mm_node (typically i915_vma.mode)
|
|
|
|
* @size: how much space to allocate inside the GTT,
|
|
|
|
* must be #I915_GTT_PAGE_SIZE aligned
|
|
|
|
* @offset: where to insert inside the GTT,
|
|
|
|
* must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
|
|
|
|
* (@offset + @size) must fit within the address space
|
|
|
|
* @color: color to apply to node, if this node is not from a VMA,
|
|
|
|
* color must be #I915_COLOR_UNEVICTABLE
|
|
|
|
* @flags: control search and eviction behaviour
|
2017-01-11 18:23:11 +07:00
|
|
|
*
|
|
|
|
* i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
|
|
|
|
* the address space (using @size and @color). If the @node does not fit, it
|
|
|
|
* tries to evict any overlapping nodes from the GTT, including any
|
|
|
|
* neighbouring nodes if the colors do not match (to ensure guard pages between
|
|
|
|
* differing domains). See i915_gem_evict_for_node() for the gory details
|
|
|
|
* on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
|
|
|
|
* evicting active overlapping objects, and any overlapping node that is pinned
|
|
|
|
* or marked as unevictable will also result in failure.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
|
|
|
|
* asked to wait for eviction and interrupted.
|
|
|
|
*/
|
|
|
|
int i915_gem_gtt_reserve(struct i915_address_space *vm,
|
|
|
|
struct drm_mm_node *node,
|
|
|
|
u64 size, u64 offset, unsigned long color,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!size);
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
|
|
|
|
GEM_BUG_ON(range_overflows(offset, size, vm->total));
|
2019-07-30 21:32:08 +07:00
|
|
|
GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
|
2017-01-16 00:27:40 +07:00
|
|
|
GEM_BUG_ON(drm_mm_node_allocated(node));
|
2017-01-11 18:23:11 +07:00
|
|
|
|
|
|
|
node->size = size;
|
|
|
|
node->start = offset;
|
|
|
|
node->color = color;
|
|
|
|
|
|
|
|
err = drm_mm_reserve_node(&vm->mm, node);
|
|
|
|
if (err != -ENOSPC)
|
|
|
|
return err;
|
|
|
|
|
2017-06-16 21:05:21 +07:00
|
|
|
if (flags & PIN_NOEVICT)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2017-01-11 18:23:11 +07:00
|
|
|
err = i915_gem_evict_for_node(vm, node, flags);
|
|
|
|
if (err == 0)
|
|
|
|
err = drm_mm_reserve_node(&vm->mm, node);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-01-11 18:23:12 +07:00
|
|
|
static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
|
|
|
|
{
|
|
|
|
u64 range, addr;
|
|
|
|
|
|
|
|
GEM_BUG_ON(range_overflows(start, len, end));
|
|
|
|
GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
|
|
|
|
|
|
|
|
range = round_down(end - len, align) - round_up(start, align);
|
|
|
|
if (range) {
|
|
|
|
if (sizeof(unsigned long) == sizeof(u64)) {
|
|
|
|
addr = get_random_long();
|
|
|
|
} else {
|
|
|
|
addr = get_random_int();
|
|
|
|
if (range > U32_MAX) {
|
|
|
|
addr <<= 32;
|
|
|
|
addr |= get_random_int();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
div64_u64_rem(addr, range, &addr);
|
|
|
|
start += addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return round_up(start, align);
|
|
|
|
}
|
|
|
|
|
2017-01-11 18:23:10 +07:00
|
|
|
/**
|
|
|
|
* i915_gem_gtt_insert - insert a node into an address_space (GTT)
|
2017-01-12 23:45:59 +07:00
|
|
|
* @vm: the &struct i915_address_space
|
|
|
|
* @node: the &struct drm_mm_node (typically i915_vma.node)
|
|
|
|
* @size: how much space to allocate inside the GTT,
|
|
|
|
* must be #I915_GTT_PAGE_SIZE aligned
|
|
|
|
* @alignment: required alignment of starting offset, may be 0 but
|
|
|
|
* if specified, this must be a power-of-two and at least
|
|
|
|
* #I915_GTT_MIN_ALIGNMENT
|
|
|
|
* @color: color to apply to node
|
|
|
|
* @start: start of any range restriction inside GTT (0 for all),
|
2017-01-11 18:23:10 +07:00
|
|
|
* must be #I915_GTT_PAGE_SIZE aligned
|
2017-01-12 23:45:59 +07:00
|
|
|
* @end: end of any range restriction inside GTT (U64_MAX for all),
|
|
|
|
* must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
|
|
|
|
* @flags: control search and eviction behaviour
|
2017-01-11 18:23:10 +07:00
|
|
|
*
|
|
|
|
* i915_gem_gtt_insert() first searches for an available hole into which
|
|
|
|
* is can insert the node. The hole address is aligned to @alignment and
|
|
|
|
* its @size must then fit entirely within the [@start, @end] bounds. The
|
|
|
|
* nodes on either side of the hole must match @color, or else a guard page
|
|
|
|
* will be inserted between the two nodes (or the node evicted). If no
|
2017-01-11 18:23:12 +07:00
|
|
|
* suitable hole is found, first a victim is randomly selected and tested
|
|
|
|
* for eviction, otherwise then the LRU list of objects within the GTT
|
2017-01-11 18:23:10 +07:00
|
|
|
* is scanned to find the first set of replacement nodes to create the hole.
|
|
|
|
* Those old overlapping nodes are evicted from the GTT (and so must be
|
|
|
|
* rebound before any future use). Any node that is currently pinned cannot
|
|
|
|
* be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
|
|
|
|
* active and #PIN_NONBLOCK is specified, that node is also skipped when
|
|
|
|
* searching for an eviction candidate. See i915_gem_evict_something() for
|
|
|
|
* the gory details on the eviction algorithm.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
|
|
|
|
* asked to wait for eviction and interrupted.
|
|
|
|
*/
|
|
|
|
int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|
|
|
struct drm_mm_node *node,
|
|
|
|
u64 size, u64 alignment, unsigned long color,
|
|
|
|
u64 start, u64 end, unsigned int flags)
|
|
|
|
{
|
2017-02-03 04:04:38 +07:00
|
|
|
enum drm_mm_insert_mode mode;
|
2017-01-11 18:23:12 +07:00
|
|
|
u64 offset;
|
2017-01-11 18:23:10 +07:00
|
|
|
int err;
|
|
|
|
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 20:39:58 +07:00
|
|
|
lockdep_assert_held(&vm->mutex);
|
|
|
|
|
2017-01-11 18:23:10 +07:00
|
|
|
GEM_BUG_ON(!size);
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
|
|
|
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
|
|
|
|
GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
|
|
|
|
GEM_BUG_ON(start >= end);
|
|
|
|
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
|
|
|
|
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
|
2019-07-30 21:32:08 +07:00
|
|
|
GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
|
2017-01-16 00:27:40 +07:00
|
|
|
GEM_BUG_ON(drm_mm_node_allocated(node));
|
2017-01-11 18:23:10 +07:00
|
|
|
|
|
|
|
if (unlikely(range_overflows(start, size, end)))
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2017-02-03 04:04:38 +07:00
|
|
|
mode = DRM_MM_INSERT_BEST;
|
|
|
|
if (flags & PIN_HIGH)
|
2018-05-21 15:21:30 +07:00
|
|
|
mode = DRM_MM_INSERT_HIGHEST;
|
2017-02-03 04:04:38 +07:00
|
|
|
if (flags & PIN_MAPPABLE)
|
|
|
|
mode = DRM_MM_INSERT_LOW;
|
2017-01-11 18:23:10 +07:00
|
|
|
|
|
|
|
/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
|
|
|
|
* so we know that we always have a minimum alignment of 4096.
|
|
|
|
* The drm_mm range manager is optimised to return results
|
|
|
|
* with zero alignment, so where possible use the optimal
|
|
|
|
* path.
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
|
|
|
|
if (alignment <= I915_GTT_MIN_ALIGNMENT)
|
|
|
|
alignment = 0;
|
|
|
|
|
2017-02-03 04:04:38 +07:00
|
|
|
err = drm_mm_insert_node_in_range(&vm->mm, node,
|
|
|
|
size, alignment, color,
|
|
|
|
start, end, mode);
|
2017-01-11 18:23:10 +07:00
|
|
|
if (err != -ENOSPC)
|
|
|
|
return err;
|
|
|
|
|
2018-05-21 15:21:30 +07:00
|
|
|
if (mode & DRM_MM_INSERT_ONCE) {
|
|
|
|
err = drm_mm_insert_node_in_range(&vm->mm, node,
|
|
|
|
size, alignment, color,
|
|
|
|
start, end,
|
|
|
|
DRM_MM_INSERT_BEST);
|
|
|
|
if (err != -ENOSPC)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-16 21:05:21 +07:00
|
|
|
if (flags & PIN_NOEVICT)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2019-08-21 19:32:34 +07:00
|
|
|
/*
|
|
|
|
* No free space, pick a slot at random.
|
2017-01-11 18:23:12 +07:00
|
|
|
*
|
|
|
|
* There is a pathological case here using a GTT shared between
|
|
|
|
* mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
|
|
|
|
*
|
|
|
|
* |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
|
|
|
|
* (64k objects) (448k objects)
|
|
|
|
*
|
|
|
|
* Now imagine that the eviction LRU is ordered top-down (just because
|
|
|
|
* pathology meets real life), and that we need to evict an object to
|
|
|
|
* make room inside the aperture. The eviction scan then has to walk
|
|
|
|
* the 448k list before it finds one within range. And now imagine that
|
|
|
|
* it has to search for a new hole between every byte inside the memcpy,
|
|
|
|
* for several simultaneous clients.
|
|
|
|
*
|
|
|
|
* On a full-ppgtt system, if we have run out of available space, there
|
|
|
|
* will be lots and lots of objects in the eviction list! Again,
|
|
|
|
* searching that LRU list may be slow if we are also applying any
|
|
|
|
* range restrictions (e.g. restriction to low 4GiB) and so, for
|
|
|
|
* simplicity and similarilty between different GTT, try the single
|
|
|
|
* random replacement first.
|
|
|
|
*/
|
|
|
|
offset = random_offset(start, end,
|
|
|
|
size, alignment ?: I915_GTT_MIN_ALIGNMENT);
|
|
|
|
err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
|
|
|
|
if (err != -ENOSPC)
|
|
|
|
return err;
|
|
|
|
|
2019-08-21 19:32:34 +07:00
|
|
|
if (flags & PIN_NOSEARCH)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2017-01-11 18:23:12 +07:00
|
|
|
/* Randomly selected placement is pinned, do a search */
|
2017-01-11 18:23:10 +07:00
|
|
|
err = i915_gem_evict_something(vm, size, alignment, color,
|
|
|
|
start, end, flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-02-03 04:04:38 +07:00
|
|
|
return drm_mm_insert_node_in_range(&vm->mm, node,
|
|
|
|
size, alignment, color,
|
|
|
|
start, end, DRM_MM_INSERT_EVICT);
|
2017-01-11 18:23:10 +07:00
|
|
|
}
|
2017-02-14 00:15:18 +07:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
2017-02-14 00:15:38 +07:00
|
|
|
#include "selftests/i915_gem_gtt.c"
|
2017-02-14 00:15:18 +07:00
|
|
|
#endif
|