mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 22:56:42 +07:00
drm/i915: Pin fence for iomap
Acquire the fence register for the iomap in i915_vma_pin_iomap() on behalf of the caller. We probably want for the caller to specify whether the fence should be pinned for their usage, but at the moment all callers do want the associated fence, or none, so take it on their behalf. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
67e6456485
commit
b4563f595e
@ -280,13 +280,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
void __iomem *ptr;
|
||||
int err;
|
||||
|
||||
/* Access through the GTT requires the device to be awake. */
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
|
||||
return IO_ERR_PTR(-ENODEV);
|
||||
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
|
||||
err = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
|
||||
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
|
||||
@ -296,14 +299,38 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
|
||||
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
|
||||
vma->node.start,
|
||||
vma->node.size);
|
||||
if (ptr == NULL)
|
||||
return IO_ERR_PTR(-ENOMEM);
|
||||
if (ptr == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma->iomap = ptr;
|
||||
}
|
||||
|
||||
__i915_vma_pin(vma);
|
||||
|
||||
err = i915_vma_get_fence(vma);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
i915_vma_pin_fence(vma);
|
||||
|
||||
return ptr;
|
||||
|
||||
err_unpin:
|
||||
__i915_vma_unpin(vma);
|
||||
err:
|
||||
return IO_ERR_PTR(err);
|
||||
}
|
||||
|
||||
void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
|
||||
GEM_BUG_ON(vma->iomap == NULL);
|
||||
|
||||
i915_vma_unpin_fence(vma);
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
|
||||
|
@ -322,12 +322,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
|
||||
* Callers must hold the struct_mutex. This function is only valid to be
|
||||
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
|
||||
*/
|
||||
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
GEM_BUG_ON(vma->iomap == NULL);
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
void i915_vma_unpin_iomap(struct i915_vma *vma);
|
||||
|
||||
static inline struct page *i915_vma_first_page(struct i915_vma *vma)
|
||||
{
|
||||
|
@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
|
||||
return PTR_ERR(io);
|
||||
}
|
||||
|
||||
err = i915_vma_get_fence(vma);
|
||||
if (err) {
|
||||
pr_err("Failed to get fence for partial view: offset=%lu\n",
|
||||
page);
|
||||
i915_vma_unpin_iomap(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
|
||||
i915_vma_unpin_iomap(vma);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user