From 2c3a3f44dc13a7c964e93385e1c1ca848656bed0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 4 Nov 2016 10:30:01 +0000 Subject: [PATCH] drm/i915: Fix pages pin counting around swizzle quirk commit bc0629a76726 ("drm/i915: Track pages pinned due to swizzling quirk") fixed one problem, but revealed a whole lot more. The root cause of the pin count mismatch for the swizzle quirk (for L-shaped memory on gen3/4) was that we were incrementing the pages_pin_count upon getting the backing pages but then overwriting the pages_pin_count to set it to 1 afterwards. With a little bit of adjustment to satisfy the GEM_BUG_ON sanitychecks, the fix is to replace the explicit atomic_set with an atomic_inc. v2: Consistently use atomics (not mix atomics and helpers) within the lowlevel get_pages routines. This makes the atomic operations much clearer. Fixes: 1233e2db199d ("drm/i915: Move object backing storage manipulation") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161104103001.27643-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem.c | 47 ++++++++++++++------------ drivers/gpu/drm/i915/i915_gem_gtt.c | 7 ++++ drivers/gpu/drm/i915/i915_gem_tiling.c | 1 + 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f995ced524e..0dbf38c51d14 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2324,12 +2324,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); - if (i915_gem_object_is_tiled(obj) && - dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { - __i915_gem_object_pin_pages(obj); - obj->mm.quirked = true; - } - return st; err_pages: @@ -2362,12 +2356,21 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.get_page.sg_idx = 0; obj->mm.pages = pages; + + if (i915_gem_object_is_tiled(obj) && + to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + GEM_BUG_ON(obj->mm.quirked); + __i915_gem_object_pin_pages(obj); + obj->mm.quirked = true; + } } static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; + GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); + if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { DRM_DEBUG("Attempting to obtain a purgeable object\n"); return -EFAULT; @@ -2396,16 +2399,14 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (err) return err; - if (likely(obj->mm.pages)) { - __i915_gem_object_pin_pages(obj); - goto unlock; + if (unlikely(!obj->mm.pages)) { + err = ____i915_gem_object_get_pages(obj); + if (err) + goto unlock; + + smp_mb__before_atomic(); } - - GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - - err = ____i915_gem_object_get_pages(obj); - if (!err) - atomic_set_release(&obj->mm.pages_pin_count, 1); + atomic_inc(&obj->mm.pages_pin_count); unlock: mutex_unlock(&obj->mm.lock); @@ -2476,12 +2477,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, pinned = true; if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { - ret = ____i915_gem_object_get_pages(obj); - if (ret) - goto err_unlock; + if (unlikely(!obj->mm.pages)) { + ret = ____i915_gem_object_get_pages(obj); + if (ret) + goto err_unlock; - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count)); - atomic_set_release(&obj->mm.pages_pin_count, 1); + smp_mb__before_atomic(); + } + atomic_inc(&obj->mm.pages_pin_count); pinned = false; } GEM_BUG_ON(!obj->mm.pages); @@ -2933,7 +2936,7 @@ int i915_vma_unbind(struct i915_vma *vma) goto destroy; GEM_BUG_ON(obj->bind_count == 0); - GEM_BUG_ON(!obj->mm.pages); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (i915_vma_is_map_and_fenceable(vma)) { /* release the fence reg _after_ flushing */ @@ -3167,6 +3170,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list); obj->bind_count++; + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); return 0; @@ -4100,6 +4104,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->mm.quirked = false; } if (args->madv == I915_MADV_WILLNEED) { + GEM_BUG_ON(obj->mm.quirked); __i915_gem_object_pin_pages(obj); obj->mm.quirked = true; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cad6de65947d..7531bca29f7a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3711,6 +3711,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) { int ret = 0; + /* The vma->pages are only valid within the lifespan of the borrowed + * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so + * must be the vma->pages. A simple rule is that vma->pages must only + * be accessed when the obj->mm.pages are pinned. + */ + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); + if (vma->pages) return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 1577e7810cd6..251d51b01174 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -269,6 +269,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj->mm.quirked = false; } if (!i915_gem_object_is_tiled(obj)) { + GEM_BUG_ON(!obj->mm.quirked); __i915_gem_object_pin_pages(obj); obj->mm.quirked = true; }