drm/i915: Move context management under GEM

Keep track of the GEM contexts underneath i915->gem.contexts and assign
them their own lock for the purposes of list management.

v2: Focus on lock tracking; ctx->vm is protected by ctx->mutex
v3: Correct split with removal of logical HW ID

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-15-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-10-04 14:40:09 +01:00
parent 2935ed5339
commit a4e7ccdac3
28 changed files with 394 additions and 354 deletions

View File

@ -218,9 +218,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx)
{ {
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
spin_lock(&ctx->i915->gem.contexts.lock);
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines)); free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->engines_mutex);
@ -230,67 +233,54 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree(ctx->name); kfree(ctx->name);
put_pid(ctx->pid); put_pid(ctx->pid);
list_del(&ctx->link);
mutex_destroy(&ctx->mutex); mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu); kfree_rcu(ctx, rcu);
} }
static void contexts_free(struct drm_i915_private *i915) static void contexts_free_all(struct llist_node *list)
{ {
struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn; struct i915_gem_context *ctx, *cn;
lockdep_assert_held(&i915->drm.struct_mutex); llist_for_each_entry_safe(ctx, cn, list, free_link)
llist_for_each_entry_safe(ctx, cn, freed, free_link)
i915_gem_context_free(ctx); i915_gem_context_free(ctx);
} }
static void contexts_free_first(struct drm_i915_private *i915) static void contexts_flush_free(struct i915_gem_contexts *gc)
{ {
struct i915_gem_context *ctx; contexts_free_all(llist_del_all(&gc->free_list));
struct llist_node *freed;
lockdep_assert_held(&i915->drm.struct_mutex);
freed = llist_del_first(&i915->contexts.free_list);
if (!freed)
return;
ctx = container_of(freed, typeof(*ctx), free_link);
i915_gem_context_free(ctx);
} }
static void contexts_free_worker(struct work_struct *work) static void contexts_free_worker(struct work_struct *work)
{ {
struct drm_i915_private *i915 = struct i915_gem_contexts *gc =
container_of(work, typeof(*i915), contexts.free_work); container_of(work, typeof(*gc), free_work);
mutex_lock(&i915->drm.struct_mutex); contexts_flush_free(gc);
contexts_free(i915);
mutex_unlock(&i915->drm.struct_mutex);
} }
void i915_gem_context_release(struct kref *ref) void i915_gem_context_release(struct kref *ref)
{ {
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
struct drm_i915_private *i915 = ctx->i915; struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx); trace_i915_context_free(ctx);
if (llist_add(&ctx->free_link, &i915->contexts.free_list)) if (llist_add(&ctx->free_link, &gc->free_list))
queue_work(i915->wq, &i915->contexts.free_work); schedule_work(&gc->free_work);
} }
static void context_close(struct i915_gem_context *ctx) static void context_close(struct i915_gem_context *ctx)
{ {
struct i915_address_space *vm;
i915_gem_context_set_closed(ctx); i915_gem_context_set_closed(ctx);
if (ctx->vm)
i915_vm_close(ctx->vm);
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
ctx->file_priv = ERR_PTR(-EBADF); ctx->file_priv = ERR_PTR(-EBADF);
/* /*
@ -317,7 +307,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref); kref_init(&ctx->ref);
list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915; ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
@ -343,6 +332,10 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
return ctx; return ctx;
err_free: err_free:
@ -372,11 +365,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space * static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{ {
struct i915_address_space *old = ctx->vm; struct i915_address_space *old = i915_gem_context_vm(ctx);
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
ctx->vm = i915_vm_open(vm); rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm); context_apply_all(ctx, __apply_ppgtt, vm);
return old; return old;
@ -385,7 +378,7 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx, static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm) struct i915_address_space *vm)
{ {
if (vm == ctx->vm) if (vm == rcu_access_pointer(ctx->vm))
return; return;
vm = __set_ppgtt(ctx, vm); vm = __set_ppgtt(ctx, vm);
@ -417,27 +410,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
} }
static struct i915_gem_context * static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
!HAS_EXECLISTS(dev_priv)) !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Reap the most stale context */ /* Reap the stale contexts */
contexts_free_first(dev_priv); contexts_flush_free(&i915->gem.contexts);
ctx = __create_context(dev_priv); ctx = __create_context(i915);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
if (HAS_FULL_PPGTT(dev_priv)) { if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt; struct i915_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv); ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) { if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt)); PTR_ERR(ppgtt));
@ -445,14 +436,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt); return ERR_CAST(ppgtt);
} }
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm); __assign_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);
i915_vm_put(&ppgtt->vm); i915_vm_put(&ppgtt->vm);
} }
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline; struct intel_timeline *timeline;
timeline = intel_timeline_create(&dev_priv->gt, NULL); timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) { if (IS_ERR(timeline)) {
context_close(ctx); context_close(ctx);
return ERR_CAST(timeline); return ERR_CAST(timeline);
@ -497,42 +491,40 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx; return ctx;
} }
static void init_contexts(struct drm_i915_private *i915) static void init_contexts(struct i915_gem_contexts *gc)
{ {
mutex_init(&i915->contexts.mutex); spin_lock_init(&gc->lock);
INIT_LIST_HEAD(&i915->contexts.list); INIT_LIST_HEAD(&gc->list);
INIT_WORK(&i915->contexts.free_work, contexts_free_worker); INIT_WORK(&gc->free_work, contexts_free_worker);
init_llist_head(&i915->contexts.free_list); init_llist_head(&gc->free_list);
} }
int i915_gem_contexts_init(struct drm_i915_private *dev_priv) int i915_gem_init_contexts(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */ /* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(i915->kernel_context);
init_contexts(dev_priv); init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */ /* lowest priority; idle task */
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n"); DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx); return PTR_ERR(ctx);
} }
dev_priv->kernel_context = ctx; i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ? DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake"); "logical" : "fake");
return 0; return 0;
} }
void i915_gem_contexts_fini(struct drm_i915_private *i915) void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{ {
lockdep_assert_held(&i915->drm.struct_mutex);
destroy_kernel_context(&i915->kernel_context); destroy_kernel_context(&i915->kernel_context);
} }
@ -551,11 +543,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
static int gem_context_register(struct i915_gem_context *ctx, static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv) struct drm_i915_file_private *fpriv)
{ {
struct i915_address_space *vm;
int ret; int ret;
ctx->file_priv = fpriv; ctx->file_priv = fpriv;
if (ctx->vm)
ctx->vm->file = fpriv; mutex_lock(&ctx->mutex);
vm = i915_gem_context_vm(ctx);
if (vm)
WRITE_ONCE(vm->file, fpriv); /* XXX */
mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@ -592,9 +589,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr); idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1); idr_init_base(&file_priv->vm_idr, 1);
mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0); ctx = i915_gem_create_context(i915, 0);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto err; goto err;
@ -622,6 +617,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file) void i915_gem_context_close(struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
@ -630,6 +626,8 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr); idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock); mutex_destroy(&file_priv->vm_idr_lock);
contexts_flush_free(&i915->gem.contexts);
} }
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@ -808,16 +806,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm; struct i915_address_space *vm;
int ret; int ret;
if (!ctx->vm) if (!rcu_access_pointer(ctx->vm))
return -ENODEV; return -ENODEV;
/* XXX rcu acquire? */ rcu_read_lock();
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (ret)
return ret;
vm = i915_vm_get(ctx->vm); vm = i915_vm_get(ctx->vm);
mutex_unlock(&ctx->i915->drm.struct_mutex); rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret) if (ret)
@ -926,7 +920,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size) if (args->size)
return -EINVAL; return -EINVAL;
if (!ctx->vm) if (!rcu_access_pointer(ctx->vm))
return -ENODEV; return -ENODEV;
if (upper_32_bits(args->value)) if (upper_32_bits(args->value))
@ -940,17 +934,20 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (!vm) if (!vm)
return -ENOENT; return -ENOENT;
err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); err = mutex_lock_interruptible(&ctx->mutex);
if (err) if (err)
goto out; goto out;
if (vm == ctx->vm) if (i915_gem_context_is_closed(ctx)) {
err = -ENOENT;
goto out;
}
if (vm == rcu_access_pointer(ctx->vm))
goto unlock; goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */ /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
mutex_lock(&ctx->mutex);
lut_close(ctx); lut_close(ctx);
mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm); old = __set_ppgtt(ctx, vm);
@ -970,8 +967,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
} }
unlock: unlock:
mutex_unlock(&ctx->i915->drm.struct_mutex); mutex_unlock(&ctx->mutex);
out: out:
i915_vm_put(vm); i915_vm_put(vm);
return err; return err;
@ -1827,10 +1823,11 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src) struct i915_gem_context *src)
{ {
struct i915_address_space *vm; struct i915_address_space *vm;
int err = 0;
rcu_read_lock(); rcu_read_lock();
do { do {
vm = READ_ONCE(src->vm); vm = rcu_dereference(src->vm);
if (!vm) if (!vm)
break; break;
@ -1852,7 +1849,7 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere. * it cannot be reallocated elsewhere.
*/ */
if (vm == READ_ONCE(src->vm)) if (vm == rcu_access_pointer(src->vm))
break; break;
i915_vm_put(vm); i915_vm_put(vm);
@ -1860,11 +1857,16 @@ static int clone_vm(struct i915_gem_context *dst,
rcu_read_unlock(); rcu_read_unlock();
if (vm) { if (vm) {
__assign_ppgtt(dst, vm); if (!mutex_lock_interruptible(&dst->mutex)) {
__assign_ppgtt(dst, vm);
mutex_unlock(&dst->mutex);
} else {
err = -EINTR;
}
i915_vm_put(vm); i915_vm_put(vm);
} }
return 0; return err;
} }
static int create_clone(struct i915_user_extension __user *ext, void *data) static int create_clone(struct i915_user_extension __user *ext, void *data)
@ -1954,12 +1956,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO; return -EIO;
} }
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ext_data.ctx = i915_gem_create_context(i915, args->flags); ext_data.ctx = i915_gem_create_context(i915, args->flags);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx)) if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx); return PTR_ERR(ext_data.ctx);
@ -2086,10 +2083,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE: case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0; args->size = 0;
if (ctx->vm) rcu_read_lock();
args->value = ctx->vm->total; if (rcu_access_pointer(ctx->vm))
args->value = rcu_dereference(ctx->vm)->total;
else else
args->value = to_i915(dev)->ggtt.vm.total; args->value = to_i915(dev)->ggtt.vm.total;
rcu_read_unlock();
break; break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@ -2155,7 +2154,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file) void *data, struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data; struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret;
@ -2177,7 +2176,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/ */
if (capable(CAP_SYS_ADMIN)) if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error); args->reset_count = i915_reset_count(&i915->gpu_error);
else else
args->reset_count = 0; args->reset_count = 0;

View File

@ -11,7 +11,9 @@
#include "gt/intel_context.h" #include "gt/intel_context.h"
#include "i915_drv.h"
#include "i915_gem.h" #include "i915_gem.h"
#include "i915_gem_gtt.h"
#include "i915_scheduler.h" #include "i915_scheduler.h"
#include "intel_device_info.h" #include "intel_device_info.h"
@ -118,8 +120,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
} }
/* i915_gem_context.c */ /* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915, int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file); struct drm_file *file);
@ -158,6 +160,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release); kref_put(&ctx->ref, i915_gem_context_release);
} }
static inline struct i915_address_space *
i915_gem_context_vm(struct i915_gem_context *ctx)
{
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
}
static inline struct i915_address_space *
i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
rcu_read_lock();
vm = rcu_dereference(ctx->vm);
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = i915_vm_get(vm);
rcu_read_unlock();
return vm;
}
static inline struct i915_gem_engines * static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx) i915_gem_context_engines(struct i915_gem_context *ctx)
{ {

View File

@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that * In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT. * the caller uses the shared global GTT.
*/ */
struct i915_address_space *vm; struct i915_address_space __rcu *vm;
/** /**
* @pid: process id of creator * @pid: process id of creator

View File

@ -728,7 +728,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT; return -ENOENT;
eb->gem_context = ctx; eb->gem_context = ctx;
if (ctx->vm) if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
eb->context_flags = 0; eb->context_flags = 0;

View File

@ -766,7 +766,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that * On almost all of the older hw, we cannot tell the GPU that
* a page is readonly. * a page is readonly.
*/ */
vm = dev_priv->kernel_context->vm; vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
true); /* static vm */
if (!vm || !vm->has_read_only) if (!vm || !vm->has_read_only)
return -ENODEV; return -ENODEV;
} }

View File

@ -1322,15 +1322,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915; struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct intel_context *ce; struct intel_context *ce;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
unsigned int n; unsigned int n;
int first, last; int first, last;
int err; int err = 0;
/* /*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the * Make sure there's no funny business when doing a PIN_UPDATE -- in the
@ -1340,9 +1340,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages. * huge-gtt-pages.
*/ */
if (!vm || !i915_vm_is_4lvl(vm)) { vm = i915_gem_context_get_vm_rcu(ctx);
if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n"); pr_info("48b PPGTT not supported, skipping\n");
return 0; goto out_vm;
} }
first = ilog2(I915_GTT_PAGE_SIZE_64K); first = ilog2(I915_GTT_PAGE_SIZE_64K);
@ -1451,6 +1452,8 @@ static int igt_ppgtt_pin_update(void *arg)
i915_vma_close(vma); i915_vma_close(vma);
out_put: out_put:
i915_gem_object_put(obj); i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
return err; return err;
} }
@ -1460,7 +1463,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs; struct vfsmount *gemfs = i915->mm.gemfs;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
u32 *vaddr; u32 *vaddr;
@ -1510,6 +1513,7 @@ static int igt_tmpfs_fallback(void *arg)
out_restore: out_restore:
i915->mm.gemfs = gemfs; i915->mm.gemfs = gemfs;
i915_vm_put(vm);
return err; return err;
} }
@ -1517,14 +1521,14 @@ static int igt_shrink_thp(void *arg)
{ {
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER; unsigned int flags = PIN_USER;
unsigned int n; unsigned int n;
int err; int err = 0;
/* /*
* Sanity check shrinking huge-paged object -- make sure nothing blows * Sanity check shrinking huge-paged object -- make sure nothing blows
@ -1533,12 +1537,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) { if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n"); pr_info("missing THP support, skipping\n");
return 0; goto out_vm;
} }
obj = i915_gem_object_create_shmem(i915, SZ_2M); obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
@ -1607,6 +1613,8 @@ static int igt_shrink_thp(void *arg)
i915_vma_close(vma); i915_vma_close(vma);
out_put: out_put:
i915_gem_object_put(obj); i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
return err; return err;
} }
@ -1675,6 +1683,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
}; };
struct drm_file *file; struct drm_file *file;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_address_space *vm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int err; int err;
@ -1699,8 +1708,11 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
goto out_unlock; goto out_unlock;
} }
if (ctx->vm) mutex_lock(&ctx->mutex);
ctx->vm->scrub_64K = true; vm = i915_gem_context_vm(ctx);
if (vm)
WRITE_ONCE(vm->scrub_64K, true);
mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx); err = i915_subtests(tests, ctx);

View File

@ -53,19 +53,17 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) { if (!ctx) {
err = -ENOMEM; err = -ENOMEM;
goto out_unlock; goto out_file;
} }
for (n = 0; n < nctx; n++) { for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file); ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) { if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]); err = PTR_ERR(ctx[n]);
goto out_unlock; goto out_file;
} }
} }
@ -79,7 +77,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n], engine); rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto out_unlock; goto out_file;
} }
i915_request_add(rq); i915_request_add(rq);
} }
@ -87,7 +85,7 @@ static int live_nop_switch(void *arg)
pr_err("Failed to populated %d contexts\n", nctx); pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(&i915->gt);
err = -EIO; err = -EIO;
goto out_unlock; goto out_file;
} }
times[1] = ktime_get_raw(); times[1] = ktime_get_raw();
@ -97,7 +95,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name); err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err) if (err)
goto out_unlock; goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies; end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) { for_each_prime_number_from(prime, 2, 8192) {
@ -107,7 +105,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n % nctx], engine); rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto out_unlock; goto out_file;
} }
/* /*
@ -143,7 +141,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t); err = igt_live_test_end(&t);
if (err) if (err)
goto out_unlock; goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n", pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name, engine->name,
@ -151,8 +149,7 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1)); prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
} }
out_unlock: out_file:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
@ -253,12 +250,10 @@ static int live_parallel_switch(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out_locked; goto out_file;
} }
engines = i915_gem_context_lock_engines(ctx); engines = i915_gem_context_lock_engines(ctx);
@ -268,7 +263,7 @@ static int live_parallel_switch(void *arg)
if (!data) { if (!data) {
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
err = -ENOMEM; err = -ENOMEM;
goto out_locked; goto out;
} }
m = 0; /* Use the first context as our template for the engines */ m = 0; /* Use the first context as our template for the engines */
@ -276,7 +271,7 @@ static int live_parallel_switch(void *arg)
err = intel_context_pin(ce); err = intel_context_pin(ce);
if (err) { if (err) {
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
goto out_locked; goto out;
} }
data[m++].ce[0] = intel_context_get(ce); data[m++].ce[0] = intel_context_get(ce);
} }
@ -287,7 +282,7 @@ static int live_parallel_switch(void *arg)
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out_locked; goto out;
} }
for (m = 0; m < count; m++) { for (m = 0; m < count; m++) {
@ -296,20 +291,18 @@ static int live_parallel_switch(void *arg)
ce = intel_context_create(ctx, data[m].ce[0]->engine); ce = intel_context_create(ctx, data[m].ce[0]->engine);
if (IS_ERR(ce)) if (IS_ERR(ce))
goto out_locked; goto out;
err = intel_context_pin(ce); err = intel_context_pin(ce);
if (err) { if (err) {
intel_context_put(ce); intel_context_put(ce);
goto out_locked; goto out;
} }
data[m].ce[n] = ce; data[m].ce[n] = ce;
} }
} }
mutex_unlock(&i915->drm.struct_mutex);
for (fn = func; !err && *fn; fn++) { for (fn = func; !err && *fn; fn++) {
struct igt_live_test t; struct igt_live_test t;
int n; int n;
@ -354,8 +347,7 @@ static int live_parallel_switch(void *arg)
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
} }
mutex_lock(&i915->drm.struct_mutex); out:
out_locked:
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
for (m = 0; m < ARRAY_SIZE(data->ce); m++) { for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
if (!data[n].ce[m]) if (!data[n].ce[m])
@ -365,8 +357,8 @@ static int live_parallel_switch(void *arg)
intel_context_put(data[n].ce[m]); intel_context_put(data[n].ce[m]);
} }
} }
mutex_unlock(&i915->drm.struct_mutex);
kfree(data); kfree(data);
out_file:
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
@ -626,11 +618,9 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, engine->name); err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err) if (err)
goto out_unlock; goto out_file;
ncontexts = 0; ncontexts = 0;
ndwords = 0; ndwords = 0;
@ -642,7 +632,7 @@ static int igt_ctx_exec(void *arg)
ctx = kernel_context(i915); ctx = kernel_context(i915);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out_unlock; goto out_file;
} }
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
@ -654,7 +644,7 @@ static int igt_ctx_exec(void *arg)
err = PTR_ERR(obj); err = PTR_ERR(obj);
intel_context_put(ce); intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_unlock; goto out_file;
} }
} }
@ -663,17 +653,18 @@ static int igt_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, engine->name,
yesno(!!ctx->vm), err); yesno(!!rcu_access_pointer(ctx->vm)),
err);
intel_context_put(ce); intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_unlock; goto out_file;
} }
err = throttle(ce, tq, ARRAY_SIZE(tq)); err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) { if (err) {
intel_context_put(ce); intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_unlock; goto out_file;
} }
if (++dw == max_dwords(obj)) { if (++dw == max_dwords(obj)) {
@ -703,11 +694,10 @@ static int igt_ctx_exec(void *arg)
dw += rem; dw += rem;
} }
out_unlock: out_file:
throttle_release(tq, ARRAY_SIZE(tq)); throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
if (err) if (err)
@ -742,22 +732,20 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
parent = live_context(i915, file); parent = live_context(i915, file);
if (IS_ERR(parent)) { if (IS_ERR(parent)) {
err = PTR_ERR(parent); err = PTR_ERR(parent);
goto out_unlock; goto out_file;
} }
if (!parent->vm) { /* not full-ppgtt; nothing to share */ if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0; err = 0;
goto out_unlock; goto out_file;
} }
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
goto out_unlock; goto out_file;
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
unsigned long ncontexts, ndwords, dw; unsigned long ncontexts, ndwords, dw;
@ -781,7 +769,9 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test; goto out_test;
} }
mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, parent->vm); __assign_ppgtt(ctx, parent->vm);
mutex_unlock(&ctx->mutex);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce)); GEM_BUG_ON(IS_ERR(ce));
@ -801,7 +791,8 @@ static int igt_shared_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
engine->name, engine->name,
yesno(!!ctx->vm), err); yesno(!!rcu_access_pointer(ctx->vm)),
err);
intel_context_put(ce); intel_context_put(ce);
kernel_context_close(ctx); kernel_context_close(ctx);
goto out_test; goto out_test;
@ -840,17 +831,13 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem; dw += rem;
} }
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
} }
out_test: out_test:
throttle_release(tq, ARRAY_SIZE(tq)); throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
out_unlock: out_file:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
@ -1222,8 +1209,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
if (flags & TEST_RESET) if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt); igt_global_reset_lock(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx); ret = PTR_ERR(ctx);
@ -1278,8 +1263,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
i915_gem_object_put(obj); i915_gem_object_put(obj);
out_unlock: out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
if (flags & TEST_RESET) if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(&i915->gt);
@ -1339,23 +1322,24 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
goto out_unlock; goto out_file;
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out_unlock; goto out_file;
} }
vm = ctx->vm ?: &i915->ggtt.alias->vm; rcu_read_lock();
vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) { if (!vm || !vm->has_read_only) {
rcu_read_unlock();
err = 0; err = 0;
goto out_unlock; goto out_file;
} }
rcu_read_unlock();
ndwords = 0; ndwords = 0;
dw = 0; dw = 0;
@ -1373,7 +1357,7 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
goto out_unlock; goto out_file;
} }
if (prandom_u32_state(&prng) & 1) if (prandom_u32_state(&prng) & 1)
@ -1384,15 +1368,17 @@ static int igt_ctx_readonly(void *arg)
if (err) { if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj), ndwords, dw, max_dwords(obj),
ce->engine->name, yesno(!!ctx->vm), err); ce->engine->name,
yesno(!!rcu_access_pointer(ctx->vm)),
err);
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
goto out_unlock; goto out_file;
} }
err = throttle(ce, tq, ARRAY_SIZE(tq)); err = throttle(ce, tq, ARRAY_SIZE(tq));
if (err) { if (err) {
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
goto out_unlock; goto out_file;
} }
if (++dw == max_dwords(obj)) { if (++dw == max_dwords(obj)) {
@ -1424,20 +1410,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem; dw += rem;
} }
out_unlock: out_file:
throttle_release(tq, ARRAY_SIZE(tq)); throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
static int check_scratch(struct i915_gem_context *ctx, u64 offset) static int check_scratch(struct i915_address_space *vm, u64 offset)
{ {
struct drm_mm_node *node = struct drm_mm_node *node =
__drm_mm_interval_first(&ctx->vm->mm, __drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1); offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset) if (!node || node->start > offset)
return 0; return 0;
@ -1455,6 +1440,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{ {
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
u32 *cmd; u32 *cmd;
@ -1487,17 +1473,18 @@ static int write_to_scratch(struct i915_gem_context *ctx,
intel_gt_chipset_flush(engine->gt); intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL); vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err_vm;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err) if (err)
goto err; goto err_vm;
err = check_scratch(ctx, offset); err = check_scratch(vm, offset);
if (err) if (err)
goto err_unpin; goto err_unpin;
@ -1523,6 +1510,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
i915_request_add(rq); i915_request_add(rq);
i915_vm_put(vm);
return 0; return 0;
skip_request: skip_request:
@ -1531,6 +1519,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
i915_request_add(rq); i915_request_add(rq);
err_unpin: err_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err_vm:
i915_vm_put(vm);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return err; return err;
@ -1542,6 +1532,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{ {
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100; const u32 result = 0x100;
struct i915_request *rq; struct i915_request *rq;
@ -1586,17 +1577,18 @@ static int read_from_scratch(struct i915_gem_context *ctx,
intel_gt_chipset_flush(engine->gt); intel_gt_chipset_flush(engine->gt);
vma = i915_vma_instance(obj, ctx->vm, NULL); vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err_vm;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err) if (err)
goto err; goto err_vm;
err = check_scratch(ctx, offset); err = check_scratch(vm, offset);
if (err) if (err)
goto err_unpin; goto err_unpin;
@ -1627,12 +1619,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false); err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
if (err) if (err)
goto err; goto err_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) { if (IS_ERR(cmd)) {
err = PTR_ERR(cmd); err = PTR_ERR(cmd);
goto err; goto err_vm;
} }
*value = cmd[result / sizeof(*cmd)]; *value = cmd[result / sizeof(*cmd)];
@ -1647,6 +1639,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_request_add(rq); i915_request_add(rq);
err_unpin: err_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err_vm:
i915_vm_put(vm);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return err; return err;
@ -1677,27 +1671,25 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, "");
if (err) if (err)
goto out_unlock; goto out_file;
ctx_a = live_context(i915, file); ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) { if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a); err = PTR_ERR(ctx_a);
goto out_unlock; goto out_file;
} }
ctx_b = live_context(i915, file); ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) { if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b); err = PTR_ERR(ctx_b);
goto out_unlock; goto out_file;
} }
/* We can only test vm isolation, if the vm are distinct */ /* We can only test vm isolation, if the vm are distinct */
if (ctx_a->vm == ctx_b->vm) if (ctx_a->vm == ctx_b->vm)
goto out_unlock; goto out_file;
vm_total = ctx_a->vm->total; vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total); GEM_BUG_ON(ctx_b->vm->total != vm_total);
@ -1726,7 +1718,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine, err = read_from_scratch(ctx_b, engine,
offset, &value); offset, &value);
if (err) if (err)
goto out_unlock; goto out_file;
if (value) { if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@ -1735,7 +1727,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset), lower_32_bits(offset),
this); this);
err = -EINVAL; err = -EINVAL;
goto out_unlock; goto out_file;
} }
this++; this++;
@ -1745,11 +1737,9 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n", pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines); count, RUNTIME_INFO(i915)->num_engines);
out_unlock: out_file:
if (igt_live_test_end(&t)) if (igt_live_test_end(&t))
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
@ -1781,13 +1771,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new. * a request; useful for retiring old state after loading new.
*/ */
mutex_lock(&i915->drm.struct_mutex);
ctx = mock_context(i915, "mock"); ctx = mock_context(i915, "mock");
if (!ctx) { if (!ctx)
err = -ENOMEM; return -ENOMEM;
goto unlock;
}
counter = 0; counter = 0;
err = context_barrier_task(ctx, 0, err = context_barrier_task(ctx, 0,
@ -1860,8 +1846,6 @@ static int mock_context_barrier(void *arg)
out: out:
mock_context_close(ctx); mock_context_close(ctx);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err; return err;
#undef pr_fmt #undef pr_fmt
#define pr_fmt(x) x #define pr_fmt(x) x

View File

@ -42,7 +42,10 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt) if (!ppgtt)
goto err_put; goto err_put;
mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm); __set_ppgtt(ctx, &ppgtt->vm);
mutex_unlock(&ctx->mutex);
i915_vm_put(&ppgtt->vm); i915_vm_put(&ppgtt->vm);
} }
@ -65,7 +68,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915) void mock_init_contexts(struct drm_i915_private *i915)
{ {
init_contexts(i915); init_contexts(&i915->gem.contexts);
} }
struct i915_gem_context * struct i915_gem_context *
@ -74,8 +77,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int err; int err;
lockdep_assert_held(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0); ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;

View File

@ -221,12 +221,20 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx, struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct i915_address_space *vm;
GEM_BUG_ON(!engine->cops); GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref); kref_init(&ce->ref);
ce->gem_context = ctx; ce->gem_context = ctx;
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm); rcu_read_lock();
vm = rcu_dereference(ctx->vm);
if (vm)
ce->vm = i915_vm_get(vm);
else
ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
rcu_read_unlock();
if (ctx->timeline) if (ctx->timeline)
ce->timeline = intel_timeline_get(ctx->timeline); ce->timeline = intel_timeline_get(ctx->timeline);

View File

@ -155,13 +155,9 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one. * HW tries to write past the end of one.
*/ */
mutex_lock(&gt->i915->drm.struct_mutex);
fixme = kernel_context(gt->i915); fixme = kernel_context(gt->i915);
if (IS_ERR(fixme)) { if (IS_ERR(fixme))
err = PTR_ERR(fixme); return PTR_ERR(fixme);
goto unlock;
}
for_each_engine(engine, gt->i915, id) { for_each_engine(engine, gt->i915, id) {
struct { struct {
@ -201,8 +197,6 @@ static int live_context_size(void *arg)
} }
kernel_context_close(fixme); kernel_context_close(fixme);
unlock:
mutex_unlock(&gt->i915->drm.struct_mutex);
return err; return err;
} }
@ -305,12 +299,10 @@ static int live_active_context(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
fixme = live_context(gt->i915, file); fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) { if (IS_ERR(fixme)) {
err = PTR_ERR(fixme); err = PTR_ERR(fixme);
goto unlock; goto out_file;
} }
for_each_engine(engine, gt->i915, id) { for_each_engine(engine, gt->i915, id) {
@ -323,8 +315,7 @@ static int live_active_context(void *arg)
break; break;
} }
unlock: out_file:
mutex_unlock(&gt->i915->drm.struct_mutex);
mock_file_free(gt->i915, file); mock_file_free(gt->i915, file);
return err; return err;
} }
@ -418,12 +409,10 @@ static int live_remote_context(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
fixme = live_context(gt->i915, file); fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) { if (IS_ERR(fixme)) {
err = PTR_ERR(fixme); err = PTR_ERR(fixme);
goto unlock; goto out_file;
} }
for_each_engine(engine, gt->i915, id) { for_each_engine(engine, gt->i915, id) {
@ -436,8 +425,7 @@ static int live_remote_context(void *arg)
break; break;
} }
unlock: out_file:
mutex_unlock(&gt->i915->drm.struct_mutex);
mock_file_free(gt->i915, file); mock_file_free(gt->i915, file);
return err; return err;
} }

View File

@ -58,9 +58,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
memset(h, 0, sizeof(*h)); memset(h, 0, sizeof(*h));
h->gt = gt; h->gt = gt;
mutex_lock(&gt->i915->drm.struct_mutex);
h->ctx = kernel_context(gt->i915); h->ctx = kernel_context(gt->i915);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(h->ctx)) if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx); return PTR_ERR(h->ctx);
@ -133,7 +131,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine) hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{ {
struct intel_gt *gt = h->gt; struct intel_gt *gt = h->gt;
struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL; struct i915_request *rq = NULL;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
@ -143,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
int err; int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
i915_vm_put(vm);
return ERR_CAST(obj); return ERR_CAST(obj);
}
vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
i915_vm_put(vm);
return ERR_CAST(vaddr); return ERR_CAST(vaddr);
} }
@ -159,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
h->batch = vaddr; h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL); vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma)) {
i915_vm_put(vm);
return ERR_CAST(vma); return ERR_CAST(vma);
}
hws = i915_vma_instance(h->hws, vm, NULL); hws = i915_vma_instance(h->hws, vm, NULL);
if (IS_ERR(hws)) if (IS_ERR(hws)) {
i915_vm_put(vm);
return ERR_CAST(hws); return ERR_CAST(hws);
}
err = i915_vma_pin(vma, 0, 0, PIN_USER); err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) if (err) {
i915_vm_put(vm);
return ERR_PTR(err); return ERR_PTR(err);
}
err = i915_vma_pin(hws, 0, 0, PIN_USER); err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err) if (err)
@ -266,6 +273,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
i915_vma_unpin(hws); i915_vma_unpin(hws);
unpin_vma: unpin_vma:
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vm_put(vm);
return err ? ERR_PTR(err) : rq; return err ? ERR_PTR(err) : rq;
} }
@ -382,9 +390,7 @@ static int igt_reset_nop(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file); ctx = live_context(gt->i915, file);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out; goto out;
@ -458,9 +464,7 @@ static int igt_reset_nop_engine(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file); ctx = live_context(gt->i915, file);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out; goto out;
@ -705,9 +709,7 @@ static int active_engine(void *data)
return PTR_ERR(file); return PTR_ERR(file);
for (count = 0; count < ARRAY_SIZE(ctx); count++) { for (count = 0; count < ARRAY_SIZE(ctx); count++) {
mutex_lock(&engine->i915->drm.struct_mutex);
ctx[count] = live_context(engine->i915, file); ctx[count] = live_context(engine->i915, file);
mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(ctx[count])) { if (IS_ERR(ctx[count])) {
err = PTR_ERR(ctx[count]); err = PTR_ERR(ctx[count]);
while (--count) while (--count)
@ -1291,6 +1293,7 @@ static int igt_reset_evict_ppgtt(void *arg)
{ {
struct intel_gt *gt = arg; struct intel_gt *gt = arg;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_address_space *vm;
struct drm_file *file; struct drm_file *file;
int err; int err;
@ -1298,18 +1301,20 @@ static int igt_reset_evict_ppgtt(void *arg)
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file); ctx = live_context(gt->i915, file);
mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out; goto out;
} }
err = 0; err = 0;
if (ctx->vm) /* aliasing == global gtt locking, covered above */ vm = i915_gem_context_get_vm_rcu(ctx);
err = __igt_reset_evict_vma(gt, ctx->vm, if (!i915_is_ggtt(vm)) {
/* aliasing == global gtt locking, covered above */
err = __igt_reset_evict_vma(gt, vm,
evict_vma, EXEC_OBJECT_WRITE); evict_vma, EXEC_OBJECT_WRITE);
}
i915_vm_put(vm);
out: out:
mock_file_free(gt->i915, file); mock_file_free(gt->i915, file);

View File

@ -1631,7 +1631,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0; int err = 0;
if (batch) { if (batch) {
vma = i915_vma_instance(batch, ctx->vm, NULL); struct i915_address_space *vm;
vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(batch, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);

View File

@ -260,7 +260,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
rq = igt_spinner_create_request(spin, ce, MI_NOOP); rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce); intel_context_put(ce);
kernel_context_close(ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
spin = NULL; spin = NULL;
@ -279,6 +278,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
if (err && spin) if (err && spin)
igt_spinner_end(spin); igt_spinner_end(spin);
kernel_context_close(ctx);
return err; return err;
} }
@ -355,6 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
static struct i915_vma *create_batch(struct i915_gem_context *ctx) static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
@ -362,7 +363,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_instance(obj, ctx->vm, NULL); vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_obj; goto err_obj;
@ -463,12 +466,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff, 0xffff00ff,
0xffffffff, 0xffffffff,
}; };
struct i915_address_space *vm;
struct i915_vma *scratch; struct i915_vma *scratch;
struct i915_vma *batch; struct i915_vma *batch;
int err = 0, i, v; int err = 0, i, v;
u32 *cs, *results; u32 *cs, *results;
scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1); vm = i915_gem_context_get_vm_rcu(ctx);
scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
i915_vm_put(vm);
if (IS_ERR(scratch)) if (IS_ERR(scratch))
return PTR_ERR(scratch); return PTR_ERR(scratch);
@ -1010,6 +1016,7 @@ static int live_isolated_whitelist(void *arg)
return 0; return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) { for (i = 0; i < ARRAY_SIZE(client); i++) {
struct i915_address_space *vm;
struct i915_gem_context *c; struct i915_gem_context *c;
c = kernel_context(i915); c = kernel_context(i915);
@ -1018,22 +1025,27 @@ static int live_isolated_whitelist(void *arg)
goto err; goto err;
} }
client[i].scratch[0] = create_scratch(c->vm, 1024); vm = i915_gem_context_get_vm_rcu(c);
client[i].scratch[0] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[0])) { if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]); err = PTR_ERR(client[i].scratch[0]);
i915_vm_put(vm);
kernel_context_close(c); kernel_context_close(c);
goto err; goto err;
} }
client[i].scratch[1] = create_scratch(c->vm, 1024); client[i].scratch[1] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[1])) { if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]); err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0); i915_vma_unpin_and_release(&client[i].scratch[0], 0);
i915_vm_put(vm);
kernel_context_close(c); kernel_context_close(c);
goto err; goto err;
} }
client[i].ctx = c; client[i].ctx = c;
i915_vm_put(vm);
} }
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {

View File

@ -365,7 +365,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_vgpu_mm *mm = workload->shadow_mm; struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); struct i915_ppgtt *ppgtt =
i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
int i = 0; int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@ -378,6 +379,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
} }
} }
i915_vm_put(&ppgtt->vm);
} }
static int static int
@ -1213,20 +1216,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_ppgtt *ppgtt;
enum intel_engine_id i; enum intel_engine_id i;
int ret; int ret;
mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX); ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
if (IS_ERR(ctx)) { if (IS_ERR(ctx))
ret = PTR_ERR(ctx); return PTR_ERR(ctx);
goto out_unlock;
}
i915_gem_context_set_force_single_submission(ctx); i915_gem_context_set_force_single_submission(ctx);
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm)); ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) { for_each_engine(engine, i915, i) {
struct intel_context *ce; struct intel_context *ce;
@ -1271,12 +1272,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0); atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
mutex_unlock(&i915->drm.struct_mutex);
return 0; return 0;
out_shadow_ctx: out_shadow_ctx:
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm)); i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, i915, i) { for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i])) if (IS_ERR(s->shadow[i]))
break; break;
@ -1284,9 +1285,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
intel_context_unpin(s->shadow[i]); intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]); intel_context_put(s->shadow[i]);
} }
i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret; return ret;
} }

View File

@ -316,12 +316,18 @@ static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915) struct drm_i915_private *i915)
{ {
struct file_stats kstats = {}; struct file_stats kstats = {};
struct i915_gem_context *ctx; struct i915_gem_context *ctx, *cn;
list_for_each_entry(ctx, &i915->contexts.list, link) { spin_lock(&i915->gem.contexts.lock);
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
if (!kref_get_unless_zero(&ctx->ref))
continue;
spin_unlock(&i915->gem.contexts.lock);
for_each_gem_engine(ce, for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) { i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce); intel_context_lock_pinned(ce);
@ -338,7 +344,9 @@ static void print_context_stats(struct seq_file *m,
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) { if (!IS_ERR_OR_NULL(ctx->file_priv)) {
struct file_stats stats = { .vm = ctx->vm, }; struct file_stats stats = {
.vm = rcu_access_pointer(ctx->vm),
};
struct drm_file *file = ctx->file_priv->file; struct drm_file *file = ctx->file_priv->file;
struct task_struct *task; struct task_struct *task;
char name[80]; char name[80];
@ -355,7 +363,12 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats); print_file_stats(m, name, stats);
} }
spin_lock(&i915->gem.contexts.lock);
list_safe_reset_next(ctx, cn, link);
i915_gem_context_put(ctx);
} }
spin_unlock(&i915->gem.contexts.lock);
print_file_stats(m, "[k]contexts", kstats); print_file_stats(m, "[k]contexts", kstats);
} }
@ -363,7 +376,6 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data) static int i915_gem_object_info(struct seq_file *m, void *data)
{ {
struct drm_i915_private *i915 = node_to_i915(m->private); struct drm_i915_private *i915 = node_to_i915(m->private);
int ret;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count, i915->mm.shrink_count,
@ -372,12 +384,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_putc(m, '\n'); seq_putc(m, '\n');
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
print_context_stats(m, i915); print_context_stats(m, i915);
mutex_unlock(&i915->drm.struct_mutex);
return 0; return 0;
} }
@ -1579,19 +1586,19 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused) static int i915_context_status(struct seq_file *m, void *unused)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm; struct i915_gem_context *ctx, *cn;
struct i915_gem_context *ctx;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex); spin_lock(&i915->gem.contexts.lock);
if (ret) list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
if (!kref_get_unless_zero(&ctx->ref))
continue;
spin_unlock(&i915->gem.contexts.lock);
seq_puts(m, "HW context "); seq_puts(m, "HW context ");
if (ctx->pid) { if (ctx->pid) {
struct task_struct *task; struct task_struct *task;
@ -1626,9 +1633,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n'); seq_putc(m, '\n');
}
mutex_unlock(&dev->struct_mutex); spin_lock(&i915->gem.contexts.lock);
list_safe_reset_next(ctx, cn, link);
i915_gem_context_put(ctx);
}
spin_unlock(&i915->gem.contexts.lock);
return 0; return 0;
} }

View File

@ -1702,10 +1702,8 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file); i915_gem_context_close(file);
i915_gem_release(dev, file); i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
kfree_rcu(file_priv, rcu); kfree_rcu(file_priv, rcu);

View File

@ -1536,13 +1536,6 @@ struct drm_i915_private {
int audio_power_refcount; int audio_power_refcount;
u32 audio_freq_cntrl; u32 audio_freq_cntrl;
struct {
struct mutex mutex;
struct list_head list;
struct llist_head free_list;
struct work_struct free_work;
} contexts;
u32 fdi_rx_config; u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@ -1698,6 +1691,14 @@ struct drm_i915_private {
struct { struct {
struct notifier_block pm_notifier; struct notifier_block pm_notifier;
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
struct llist_head free_list;
struct work_struct free_work;
} contexts;
} gem; } gem;
/* For i945gm vblank irq vs. C3 workaround */ /* For i945gm vblank irq vs. C3 workaround */

View File

@ -1266,7 +1266,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock; goto err_unlock;
} }
ret = i915_gem_contexts_init(dev_priv); ret = i915_gem_init_contexts(dev_priv);
if (ret) { if (ret) {
GEM_BUG_ON(ret == -EIO); GEM_BUG_ON(ret == -EIO);
goto err_scratch; goto err_scratch;
@ -1348,7 +1348,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
} }
err_context: err_context:
if (ret != -EIO) if (ret != -EIO)
i915_gem_contexts_fini(dev_priv); i915_gem_driver_release__contexts(dev_priv);
err_scratch: err_scratch:
intel_gt_driver_release(&dev_priv->gt); intel_gt_driver_release(&dev_priv->gt);
err_unlock: err_unlock:
@ -1416,11 +1416,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
void i915_gem_driver_release(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{ {
mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv); intel_engines_cleanup(dev_priv);
i915_gem_contexts_fini(dev_priv); i915_gem_driver_release__contexts(dev_priv);
intel_gt_driver_release(&dev_priv->gt); intel_gt_driver_release(&dev_priv->gt);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_wa_list_free(&dev_priv->gt_wa_list); intel_wa_list_free(&dev_priv->gt_wa_list);
@ -1430,7 +1428,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list)); WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
} }
void i915_gem_init_mmio(struct drm_i915_private *i915) void i915_gem_init_mmio(struct drm_i915_private *i915)

View File

@ -1366,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (vm->has_read_only && if (vm->has_read_only &&
vm->i915->kernel_context && vm->i915->kernel_context &&
vm->i915->kernel_context->vm) { vm->i915->kernel_context->vm) {
struct i915_address_space *clone = vm->i915->kernel_context->vm; struct i915_address_space *clone =
rcu_dereference_protected(vm->i915->kernel_context->vm,
true); /* static */
GEM_BUG_ON(!clone->has_read_only); GEM_BUG_ON(!clone->has_read_only);

View File

@ -1853,8 +1853,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
}; };
#undef ctx_flexeuN #undef ctx_flexeuN
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_context *ctx; struct i915_gem_context *ctx, *cn;
int i; int i, err;
for (i = 2; i < ARRAY_SIZE(regs); i++) for (i = 2; i < ARRAY_SIZE(regs); i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
@ -1877,16 +1877,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
* context. Contexts idle at the time of reconfiguration are not * context. Contexts idle at the time of reconfiguration are not
* trapped behind the barrier. * trapped behind the barrier.
*/ */
list_for_each_entry(ctx, &i915->contexts.list, link) { spin_lock(&i915->gem.contexts.lock);
int err; list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
if (ctx == i915->kernel_context) if (ctx == i915->kernel_context)
continue; continue;
if (!kref_get_unless_zero(&ctx->ref))
continue;
spin_unlock(&i915->gem.contexts.lock);
err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs)); err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
if (err) if (err) {
i915_gem_context_put(ctx);
return err; return err;
}
spin_lock(&i915->gem.contexts.lock);
list_safe_reset_next(ctx, cn, link);
i915_gem_context_put(ctx);
} }
spin_unlock(&i915->gem.contexts.lock);
/* /*
* After updating all other contexts, we need to modify ourselves. * After updating all other contexts, we need to modify ourselves.
@ -1895,7 +1906,6 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
*/ */
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context; struct intel_context *ce = engine->kernel_context;
int err;
if (engine->class != RENDER_CLASS) if (engine->class != RENDER_CLASS)
continue; continue;

View File

@ -176,16 +176,12 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
memset(buf, 0, count); memset(buf, 0, count);
ret = i915_mutex_lock_interruptible(&i915->drm); spin_lock(&i915->gem.contexts.lock);
if (ret)
return ret;
if (i915->l3_parity.remap_info[slice]) if (i915->l3_parity.remap_info[slice])
memcpy(buf, memcpy(buf,
i915->l3_parity.remap_info[slice] + offset / sizeof(u32), i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
count); count);
spin_unlock(&i915->gem.contexts.lock);
mutex_unlock(&i915->drm.struct_mutex);
return count; return count;
} }
@ -198,8 +194,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj); struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private; int slice = (int)(uintptr_t)attr->private;
u32 *remap_info, *freeme = NULL;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
u32 **remap_info;
int ret; int ret;
ret = l3_access_valid(i915, offset); ret = l3_access_valid(i915, offset);
@ -209,37 +205,36 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32)) if (count < sizeof(u32))
return -EINVAL; return -EINVAL;
ret = i915_mutex_lock_interruptible(&i915->drm); remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (ret) if (!remap_info)
return ret; return -ENOMEM;
remap_info = &i915->l3_parity.remap_info[slice]; spin_lock(&i915->gem.contexts.lock);
if (!*remap_info) {
*remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); if (i915->l3_parity.remap_info[slice]) {
if (!*remap_info) { freeme = remap_info;
ret = -ENOMEM; remap_info = i915->l3_parity.remap_info[slice];
goto out; } else {
} i915->l3_parity.remap_info[slice] = remap_info;
} }
count = round_down(count, sizeof(u32)); count = round_down(count, sizeof(u32));
memcpy(*remap_info + offset / sizeof(u32), buf, count); memcpy(remap_info + offset / sizeof(u32), buf, count);
/* NB: We defer the remapping until we switch to the context */ /* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &i915->contexts.list, link) list_for_each_entry(ctx, &i915->gem.contexts.list, link)
ctx->remap_slice |= BIT(slice); ctx->remap_slice |= BIT(slice);
spin_unlock(&i915->gem.contexts.lock);
kfree(freeme);
/* /*
* TODO: Ideally we really want a GPU reset here to make sure errors * TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU * aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO. * at this point it is left as a TODO.
*/ */
ret = count; return count;
out:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
} }
static const struct bin_attribute dpf_attrs = { static const struct bin_attribute dpf_attrs = {

View File

@ -952,7 +952,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign( TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index; __entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx; __entry->ctx = ctx;
__entry->vm = ctx->vm; __entry->vm = rcu_access_pointer(ctx->vm);
), ),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, ctx=%p, ctx_vm=%p",

View File

@ -138,11 +138,9 @@ static int igt_gem_suspend(void *arg)
return PTR_ERR(file); return PTR_ERR(file);
err = -ENOMEM; err = -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (!IS_ERR(ctx)) if (!IS_ERR(ctx))
err = switch_to_context(i915, ctx); err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
if (err) if (err)
goto out; goto out;
@ -157,9 +155,7 @@ static int igt_gem_suspend(void *arg)
pm_resume(i915); pm_resume(i915);
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx); err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
out: out:
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
@ -177,11 +173,9 @@ static int igt_gem_hibernate(void *arg)
return PTR_ERR(file); return PTR_ERR(file);
err = -ENOMEM; err = -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (!IS_ERR(ctx)) if (!IS_ERR(ctx))
err = switch_to_context(i915, ctx); err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
if (err) if (err)
goto out; goto out;
@ -196,9 +190,7 @@ static int igt_gem_hibernate(void *arg)
pm_resume(i915); pm_resume(i915);
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx); err = switch_to_context(i915, ctx);
mutex_unlock(&i915->drm.struct_mutex);
out: out:
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;

View File

@ -473,7 +473,6 @@ static int igt_evict_contexts(void *arg)
} }
count = 0; count = 0;
mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence); onstack_fence_init(&fence);
do { do {
struct i915_request *rq; struct i915_request *rq;
@ -510,8 +509,6 @@ static int igt_evict_contexts(void *arg)
count++; count++;
err = 0; err = 0;
} while(1); } while(1);
mutex_unlock(&i915->drm.struct_mutex);
onstack_fence_fini(&fence); onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n", pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name); count, engine->name);

View File

@ -1246,6 +1246,7 @@ static int exercise_mock(struct drm_i915_private *i915,
unsigned long end_time)) unsigned long end_time))
{ {
const u64 limit = totalram_pages() << PAGE_SHIFT; const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_address_space *vm;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
int err; int err;
@ -1254,7 +1255,9 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time); vm = i915_gem_context_get_vm_rcu(ctx);
err = func(i915, vm, 0, min(vm->total, limit), end_time);
i915_vm_put(vm);
mock_context_close(ctx); mock_context_close(ctx);
return err; return err;
@ -1801,15 +1804,15 @@ static int igt_cs_tlb(void *arg)
goto out_unlock; goto out_unlock;
} }
vm = ctx->vm; vm = i915_gem_context_get_vm_rcu(ctx);
if (!vm) if (i915_is_ggtt(vm))
goto out_unlock; goto out_vm;
/* Create two pages; dummy we prefill the TLB, and intended */ /* Create two pages; dummy we prefill the TLB, and intended */
bbe = i915_gem_object_create_internal(i915, PAGE_SIZE); bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(bbe)) { if (IS_ERR(bbe)) {
err = PTR_ERR(bbe); err = PTR_ERR(bbe);
goto out_unlock; goto out_vm;
} }
batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
@ -2014,6 +2017,8 @@ static int igt_cs_tlb(void *arg)
i915_gem_object_put(act); i915_gem_object_put(act);
out_put_bbe: out_put_bbe:
i915_gem_object_put(bbe); i915_gem_object_put(bbe);
out_vm:
i915_vm_put(vm);
out_unlock: out_unlock:
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);

View File

@ -181,9 +181,7 @@ static int igt_request_rewind(void *arg)
struct intel_context *ce; struct intel_context *ce;
int err = -EINVAL; int err = -EINVAL;
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A"); ctx[0] = mock_context(i915, "A");
mutex_unlock(&i915->drm.struct_mutex);
ce = i915_gem_context_get_engine(ctx[0], RCS0); ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce)); GEM_BUG_ON(IS_ERR(ce));
@ -197,9 +195,7 @@ static int igt_request_rewind(void *arg)
i915_request_get(request); i915_request_get(request);
i915_request_add(request); i915_request_add(request);
mutex_lock(&i915->drm.struct_mutex);
ctx[1] = mock_context(i915, "B"); ctx[1] = mock_context(i915, "B");
mutex_unlock(&i915->drm.struct_mutex);
ce = i915_gem_context_get_engine(ctx[1], RCS0); ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce)); GEM_BUG_ON(IS_ERR(ce));
@ -438,9 +434,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
} }
for (n = 0; n < t.ncontexts; n++) { for (n = 0; n < t.ncontexts; n++) {
mutex_lock(&t.engine->i915->drm.struct_mutex);
t.contexts[n] = mock_context(t.engine->i915, "mock"); t.contexts[n] = mock_context(t.engine->i915, "mock");
mutex_unlock(&t.engine->i915->drm.struct_mutex);
if (!t.contexts[n]) { if (!t.contexts[n]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_contexts; goto out_contexts;
@ -734,9 +728,9 @@ static int live_empty_request(void *arg)
static struct i915_vma *recursive_batch(struct drm_i915_private *i915) static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx = i915->kernel_context; struct i915_gem_context *ctx = i915->kernel_context;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915); const int gen = INTEL_GEN(i915);
struct i915_address_space *vm;
struct i915_vma *vma; struct i915_vma *vma;
u32 *cmd; u32 *cmd;
int err; int err;
@ -745,7 +739,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
@ -1220,9 +1216,7 @@ static int live_breadcrumbs_smoketest(void *arg)
} }
for (n = 0; n < t[0].ncontexts; n++) { for (n = 0; n < t[0].ncontexts; n++) {
mutex_lock(&i915->drm.struct_mutex);
t[0].contexts[n] = live_context(i915, file); t[0].contexts[n] = live_context(i915, file);
mutex_unlock(&i915->drm.struct_mutex);
if (!t[0].contexts[n]) { if (!t[0].contexts[n]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_contexts; goto out_contexts;

View File

@ -24,6 +24,7 @@
#include <linux/prime_numbers.h> #include <linux/prime_numbers.h>
#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h" #include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h" #include "i915_scatterlist.h"
@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{ {
bool ok = true; bool ok = true;
if (vma->vm != ctx->vm) { if (vma->vm != rcu_access_pointer(ctx->vm)) {
pr_err("VMA created with wrong VM\n"); pr_err("VMA created with wrong VM\n");
ok = false; ok = false;
} }
@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) { list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) { for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) { list_for_each_entry(ctx, contexts, link) {
struct i915_address_space *vm = ctx->vm; struct i915_address_space *vm;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
vm = i915_gem_context_get_vm_rcu(ctx);
vma = checked_vma_instance(obj, vm, NULL); vma = checked_vma_instance(obj, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);

View File

@ -59,11 +59,9 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_workqueue(i915); i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
mock_engine_free(engine); mock_engine_free(engine);
i915_gem_contexts_fini(i915); i915_gem_driver_release__contexts(i915);
mutex_unlock(&i915->drm.struct_mutex);
intel_timelines_fini(i915); intel_timelines_fini(i915);
@ -206,7 +204,7 @@ struct drm_i915_private *mock_gem_device(void)
return i915; return i915;
err_context: err_context:
i915_gem_contexts_fini(i915); i915_gem_driver_release__contexts(i915);
err_engine: err_engine:
mock_engine_free(i915->engine[RCS0]); mock_engine_free(i915->engine[RCS0]);
err_unlock: err_unlock: