2018-05-02 23:38:39 +07:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2016-2018 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
#include "i915_active.h"
|
2018-05-02 23:38:39 +07:00
|
|
|
#include "i915_syncmap.h"
|
2019-10-24 17:03:44 +07:00
|
|
|
#include "intel_gt.h"
|
|
|
|
#include "intel_ring.h"
|
|
|
|
#include "intel_timeline.h"
|
2019-03-02 00:08:59 +07:00
|
|
|
|
|
|
|
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
|
|
|
|
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
#define CACHELINE_BITS 6
|
|
|
|
#define CACHELINE_FREE CACHELINE_BITS
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_hwsp {
|
2019-06-21 14:08:09 +07:00
|
|
|
struct intel_gt *gt;
|
2019-06-21 20:16:39 +07:00
|
|
|
struct intel_gt_timelines *gt_timelines;
|
2019-01-29 01:18:10 +07:00
|
|
|
struct list_head free_link;
|
2019-03-02 00:08:59 +07:00
|
|
|
struct i915_vma *vma;
|
2019-01-29 01:18:10 +07:00
|
|
|
u64 free_bitmap;
|
|
|
|
};
|
|
|
|
|
2019-06-21 14:08:09 +07:00
|
|
|
static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
|
2019-01-29 01:18:09 +07:00
|
|
|
{
|
2019-06-21 14:08:09 +07:00
|
|
|
struct drm_i915_private *i915 = gt->i915;
|
2019-01-29 01:18:09 +07:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return ERR_CAST(obj);
|
|
|
|
|
|
|
|
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
|
|
|
|
2019-06-21 14:08:09 +07:00
|
|
|
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
|
2019-01-29 01:18:09 +07:00
|
|
|
if (IS_ERR(vma))
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
return vma;
|
|
|
|
}
|
|
|
|
|
2019-01-29 01:18:10 +07:00
|
|
|
static struct i915_vma *
|
2019-06-21 14:08:10 +07:00
|
|
|
hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
|
2019-01-29 01:18:09 +07:00
|
|
|
{
|
2019-06-21 20:16:39 +07:00
|
|
|
struct intel_gt_timelines *gt = &timeline->gt->timelines;
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_hwsp *hwsp;
|
2019-01-29 01:18:09 +07:00
|
|
|
|
2019-01-29 01:18:10 +07:00
|
|
|
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
|
2019-01-29 01:18:09 +07:00
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_lock_irq(>->hwsp_lock);
|
2019-01-29 01:18:09 +07:00
|
|
|
|
2019-01-29 01:18:10 +07:00
|
|
|
/* hwsp_free_list only contains HWSP that have available cachelines */
|
|
|
|
hwsp = list_first_entry_or_null(>->hwsp_free_list,
|
|
|
|
typeof(*hwsp), free_link);
|
|
|
|
if (!hwsp) {
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_unlock_irq(>->hwsp_lock);
|
2019-01-29 01:18:10 +07:00
|
|
|
|
|
|
|
hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
|
|
|
|
if (!hwsp)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2019-06-21 14:08:09 +07:00
|
|
|
vma = __hwsp_alloc(timeline->gt);
|
2019-01-29 01:18:10 +07:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
kfree(hwsp);
|
|
|
|
return vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
vma->private = hwsp;
|
2019-06-21 14:08:09 +07:00
|
|
|
hwsp->gt = timeline->gt;
|
2019-01-29 01:18:10 +07:00
|
|
|
hwsp->vma = vma;
|
|
|
|
hwsp->free_bitmap = ~0ull;
|
2019-06-21 14:08:09 +07:00
|
|
|
hwsp->gt_timelines = gt;
|
2019-01-29 01:18:10 +07:00
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_lock_irq(>->hwsp_lock);
|
2019-01-29 01:18:10 +07:00
|
|
|
list_add(&hwsp->free_link, >->hwsp_free_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(!hwsp->free_bitmap);
|
|
|
|
*cacheline = __ffs64(hwsp->free_bitmap);
|
|
|
|
hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
|
|
|
|
if (!hwsp->free_bitmap)
|
|
|
|
list_del(&hwsp->free_link);
|
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_unlock_irq(>->hwsp_lock);
|
2019-01-29 01:18:10 +07:00
|
|
|
|
|
|
|
GEM_BUG_ON(hwsp->vma->private != hwsp);
|
|
|
|
return hwsp->vma;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
|
2019-01-29 01:18:10 +07:00
|
|
|
{
|
2019-06-21 20:16:39 +07:00
|
|
|
struct intel_gt_timelines *gt = hwsp->gt_timelines;
|
2019-06-06 18:23:20 +07:00
|
|
|
unsigned long flags;
|
2019-01-29 01:18:10 +07:00
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_lock_irqsave(>->hwsp_lock, flags);
|
2019-01-29 01:18:10 +07:00
|
|
|
|
|
|
|
/* As a cacheline becomes available, publish the HWSP on the freelist */
|
|
|
|
if (!hwsp->free_bitmap)
|
|
|
|
list_add_tail(&hwsp->free_link, >->hwsp_free_list);
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
|
|
|
|
hwsp->free_bitmap |= BIT_ULL(cacheline);
|
2019-01-29 01:18:10 +07:00
|
|
|
|
|
|
|
/* And if no one is left using it, give the page back to the system */
|
|
|
|
if (hwsp->free_bitmap == ~0ull) {
|
|
|
|
i915_vma_put(hwsp->vma);
|
|
|
|
list_del(&hwsp->free_link);
|
|
|
|
kfree(hwsp);
|
|
|
|
}
|
|
|
|
|
2019-06-06 18:23:20 +07:00
|
|
|
spin_unlock_irqrestore(>->hwsp_lock, flags);
|
2019-01-29 01:18:09 +07:00
|
|
|
}
|
|
|
|
|
2020-03-23 16:28:34 +07:00
|
|
|
static void __rcu_cacheline_free(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct intel_timeline_cacheline *cl =
|
|
|
|
container_of(rcu, typeof(*cl), rcu);
|
|
|
|
|
|
|
|
i915_active_fini(&cl->active);
|
|
|
|
kfree(cl);
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
|
|
|
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
|
|
|
|
i915_vma_put(cl->hwsp->vma);
|
|
|
|
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
|
|
|
|
|
2020-03-23 16:28:34 +07:00
|
|
|
call_rcu(&cl->rcu, __rcu_cacheline_free);
|
2019-03-02 00:08:59 +07:00
|
|
|
}
|
|
|
|
|
2019-10-04 20:39:59 +07:00
|
|
|
__i915_active_call
|
2019-03-02 00:08:59 +07:00
|
|
|
static void __cacheline_retire(struct i915_active *active)
|
|
|
|
{
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_cacheline *cl =
|
2019-03-02 00:08:59 +07:00
|
|
|
container_of(active, typeof(*cl), active);
|
|
|
|
|
|
|
|
i915_vma_unpin(cl->hwsp->vma);
|
|
|
|
if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
|
|
|
|
__idle_cacheline_free(cl);
|
|
|
|
}
|
|
|
|
|
2019-06-22 01:38:00 +07:00
|
|
|
static int __cacheline_active(struct i915_active *active)
|
|
|
|
{
|
|
|
|
struct intel_timeline_cacheline *cl =
|
|
|
|
container_of(active, typeof(*cl), active);
|
|
|
|
|
|
|
|
__i915_vma_pin(cl->hwsp->vma);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static struct intel_timeline_cacheline *
|
|
|
|
cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_cacheline *cl;
|
2019-03-02 00:08:59 +07:00
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
|
|
|
|
|
|
|
|
cl = kmalloc(sizeof(*cl), GFP_KERNEL);
|
|
|
|
if (!cl)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
kfree(cl);
|
|
|
|
return ERR_CAST(vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_get(hwsp->vma);
|
|
|
|
cl->hwsp = hwsp;
|
|
|
|
cl->vaddr = page_pack_bits(vaddr, cacheline);
|
|
|
|
|
2019-10-04 20:40:00 +07:00
|
|
|
i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
|
2019-03-02 00:08:59 +07:00
|
|
|
|
|
|
|
return cl;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void cacheline_acquire(struct intel_timeline_cacheline *cl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2019-06-22 01:38:00 +07:00
|
|
|
if (cl)
|
|
|
|
i915_active_acquire(&cl->active);
|
2019-03-02 00:08:59 +07:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void cacheline_release(struct intel_timeline_cacheline *cl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
|
|
|
if (cl)
|
|
|
|
i915_active_release(&cl->active);
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void cacheline_free(struct intel_timeline_cacheline *cl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2020-03-06 22:46:47 +07:00
|
|
|
if (!i915_active_acquire_if_busy(&cl->active)) {
|
|
|
|
__idle_cacheline_free(cl);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
|
|
|
|
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
|
|
|
|
|
2020-03-06 22:46:47 +07:00
|
|
|
i915_active_release(&cl->active);
|
2019-03-02 00:08:59 +07:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
int intel_timeline_init(struct intel_timeline *timeline,
|
|
|
|
struct intel_gt *gt,
|
|
|
|
struct i915_vma *hwsp)
|
2018-05-02 23:38:39 +07:00
|
|
|
{
|
2019-01-29 01:18:09 +07:00
|
|
|
void *vaddr;
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-06-26 06:33:49 +07:00
|
|
|
kref_init(&timeline->kref);
|
2019-08-16 03:57:08 +07:00
|
|
|
atomic_set(&timeline->pin_count, 0);
|
2019-06-26 06:33:49 +07:00
|
|
|
|
2019-06-21 14:08:09 +07:00
|
|
|
timeline->gt = gt;
|
2019-06-26 06:33:49 +07:00
|
|
|
|
2019-01-30 01:54:50 +07:00
|
|
|
timeline->has_initial_breadcrumb = !hwsp;
|
2019-03-02 00:08:59 +07:00
|
|
|
timeline->hwsp_cacheline = NULL;
|
2019-01-29 01:18:09 +07:00
|
|
|
|
2019-01-29 01:18:10 +07:00
|
|
|
if (!hwsp) {
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_cacheline *cl;
|
2019-01-29 01:18:10 +07:00
|
|
|
unsigned int cacheline;
|
|
|
|
|
|
|
|
hwsp = hwsp_alloc(timeline, &cacheline);
|
|
|
|
if (IS_ERR(hwsp))
|
|
|
|
return PTR_ERR(hwsp);
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
cl = cacheline_alloc(hwsp->private, cacheline);
|
|
|
|
if (IS_ERR(cl)) {
|
|
|
|
__idle_hwsp_free(hwsp->private, cacheline);
|
|
|
|
return PTR_ERR(cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
timeline->hwsp_cacheline = cl;
|
2019-01-29 01:18:10 +07:00
|
|
|
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
vaddr = page_mask_bits(cl->vaddr);
|
|
|
|
} else {
|
|
|
|
timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
|
|
|
|
|
|
|
|
vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
|
|
|
|
if (IS_ERR(vaddr))
|
|
|
|
return PTR_ERR(vaddr);
|
2019-01-29 01:18:09 +07:00
|
|
|
}
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-01-29 01:18:09 +07:00
|
|
|
timeline->hwsp_seqno =
|
|
|
|
memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
timeline->hwsp_ggtt = i915_vma_get(hwsp);
|
|
|
|
GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
|
|
|
|
|
2018-05-02 23:38:39 +07:00
|
|
|
timeline->fence_context = dma_fence_context_alloc(1);
|
|
|
|
|
2019-03-01 18:05:44 +07:00
|
|
|
mutex_init(&timeline->mutex);
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-11-27 20:45:27 +07:00
|
|
|
INIT_ACTIVE_FENCE(&timeline->last_request);
|
2018-05-02 23:38:39 +07:00
|
|
|
INIT_LIST_HEAD(&timeline->requests);
|
|
|
|
|
|
|
|
i915_syncmap_init(&timeline->sync);
|
2019-01-29 01:18:09 +07:00
|
|
|
|
|
|
|
return 0;
|
2018-05-02 23:38:39 +07:00
|
|
|
}
|
|
|
|
|
2019-11-01 20:04:06 +07:00
|
|
|
void intel_gt_init_timelines(struct intel_gt *gt)
|
2019-01-28 17:23:56 +07:00
|
|
|
{
|
2019-06-21 20:16:39 +07:00
|
|
|
struct intel_gt_timelines *timelines = >->timelines;
|
2019-01-28 17:23:56 +07:00
|
|
|
|
2019-08-16 03:57:07 +07:00
|
|
|
spin_lock_init(&timelines->lock);
|
2019-06-21 14:08:03 +07:00
|
|
|
INIT_LIST_HEAD(&timelines->active_list);
|
2019-01-28 17:23:56 +07:00
|
|
|
|
2019-06-21 14:08:03 +07:00
|
|
|
spin_lock_init(&timelines->hwsp_lock);
|
|
|
|
INIT_LIST_HEAD(&timelines->hwsp_free_list);
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
void intel_timeline_fini(struct intel_timeline *timeline)
|
2018-05-02 23:38:39 +07:00
|
|
|
{
|
2019-08-16 03:57:08 +07:00
|
|
|
GEM_BUG_ON(atomic_read(&timeline->pin_count));
|
2018-05-02 23:38:39 +07:00
|
|
|
GEM_BUG_ON(!list_empty(&timeline->requests));
|
2019-11-25 17:58:58 +07:00
|
|
|
GEM_BUG_ON(timeline->retire);
|
2018-05-02 23:38:39 +07:00
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
if (timeline->hwsp_cacheline)
|
|
|
|
cacheline_free(timeline->hwsp_cacheline);
|
|
|
|
else
|
|
|
|
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
|
|
|
|
|
2019-01-29 01:18:09 +07:00
|
|
|
i915_vma_put(timeline->hwsp_ggtt);
|
2018-05-02 23:38:39 +07:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline *
|
|
|
|
intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
|
2018-05-02 23:38:39 +07:00
|
|
|
{
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline *timeline;
|
2019-01-29 01:18:09 +07:00
|
|
|
int err;
|
2018-05-02 23:38:39 +07:00
|
|
|
|
|
|
|
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
|
|
|
|
if (!timeline)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
err = intel_timeline_init(timeline, gt, global_hwsp);
|
2019-01-29 01:18:09 +07:00
|
|
|
if (err) {
|
|
|
|
kfree(timeline);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2018-05-02 23:38:39 +07:00
|
|
|
return timeline;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
int intel_timeline_pin(struct intel_timeline *tl)
|
2019-01-29 01:18:09 +07:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-08-16 03:57:08 +07:00
|
|
|
if (atomic_add_unless(&tl->pin_count, 1, 0))
|
2019-01-29 01:18:09 +07:00
|
|
|
return 0;
|
|
|
|
|
2020-01-31 01:17:10 +07:00
|
|
|
err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
|
2019-01-29 01:18:09 +07:00
|
|
|
if (err)
|
2019-08-16 03:57:08 +07:00
|
|
|
return err;
|
2019-01-29 01:18:09 +07:00
|
|
|
|
2019-01-29 01:18:11 +07:00
|
|
|
tl->hwsp_offset =
|
|
|
|
i915_ggtt_offset(tl->hwsp_ggtt) +
|
|
|
|
offset_in_page(tl->hwsp_offset);
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
cacheline_acquire(tl->hwsp_cacheline);
|
2019-08-16 03:57:08 +07:00
|
|
|
if (atomic_fetch_inc(&tl->pin_count)) {
|
|
|
|
cacheline_release(tl->hwsp_cacheline);
|
|
|
|
__i915_vma_unpin(tl->hwsp_ggtt);
|
|
|
|
}
|
2019-01-29 01:18:12 +07:00
|
|
|
|
2019-01-29 01:18:09 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-16 03:57:06 +07:00
|
|
|
void intel_timeline_enter(struct intel_timeline *tl)
|
|
|
|
{
|
|
|
|
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
|
|
|
|
2019-11-20 23:55:13 +07:00
|
|
|
/*
|
|
|
|
* Pretend we are serialised by the timeline->mutex.
|
|
|
|
*
|
|
|
|
* While generally true, there are a few exceptions to the rule
|
|
|
|
* for the engine->kernel_context being used to manage power
|
|
|
|
* transitions. As the engine_park may be called from under any
|
|
|
|
* timeline, it uses the power mutex as a global serialisation
|
|
|
|
* lock to prevent any other request entering its timeline.
|
|
|
|
*
|
|
|
|
* The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
|
|
|
|
*
|
|
|
|
* However, intel_gt_retire_request() does not know which engine
|
|
|
|
* it is retiring along and so cannot partake in the engine-pm
|
|
|
|
* barrier, and there we use the tl->active_count as a means to
|
|
|
|
* pin the timeline in the active_list while the locks are dropped.
|
|
|
|
* Ergo, as that is outside of the engine-pm barrier, we need to
|
|
|
|
* use atomic to manipulate tl->active_count.
|
|
|
|
*/
|
2019-08-16 19:09:59 +07:00
|
|
|
lockdep_assert_held(&tl->mutex);
|
2019-11-20 23:55:13 +07:00
|
|
|
|
|
|
|
if (atomic_add_unless(&tl->active_count, 1, 0))
|
2019-08-16 03:57:06 +07:00
|
|
|
return;
|
|
|
|
|
2019-11-21 00:08:58 +07:00
|
|
|
spin_lock(&timelines->lock);
|
2019-11-20 23:55:13 +07:00
|
|
|
if (!atomic_fetch_inc(&tl->active_count))
|
|
|
|
list_add_tail(&tl->link, &timelines->active_list);
|
2019-11-21 00:08:58 +07:00
|
|
|
spin_unlock(&timelines->lock);
|
2019-08-16 03:57:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void intel_timeline_exit(struct intel_timeline *tl)
|
|
|
|
{
|
|
|
|
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
|
|
|
|
2019-11-20 23:55:13 +07:00
|
|
|
/* See intel_timeline_enter() */
|
2019-08-16 19:09:59 +07:00
|
|
|
lockdep_assert_held(&tl->mutex);
|
|
|
|
|
2019-11-20 23:55:13 +07:00
|
|
|
GEM_BUG_ON(!atomic_read(&tl->active_count));
|
|
|
|
if (atomic_add_unless(&tl->active_count, -1, 1))
|
2019-08-16 03:57:06 +07:00
|
|
|
return;
|
|
|
|
|
2019-11-21 00:08:58 +07:00
|
|
|
spin_lock(&timelines->lock);
|
2019-11-20 23:55:13 +07:00
|
|
|
if (atomic_dec_and_test(&tl->active_count))
|
|
|
|
list_del(&tl->link);
|
2019-11-21 00:08:58 +07:00
|
|
|
spin_unlock(&timelines->lock);
|
2019-08-16 03:57:06 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since this timeline is idle, all bariers upon which we were waiting
|
|
|
|
* must also be complete and so we can discard the last used barriers
|
|
|
|
* without loss of information.
|
|
|
|
*/
|
|
|
|
i915_syncmap_free(&tl->sync);
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static u32 timeline_advance(struct intel_timeline *tl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2019-08-16 03:57:08 +07:00
|
|
|
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
2019-03-02 00:08:59 +07:00
|
|
|
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
|
|
|
|
|
|
|
|
return tl->seqno += 1 + tl->has_initial_breadcrumb;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static void timeline_rollback(struct intel_timeline *tl)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
|
|
|
tl->seqno -= 1 + tl->has_initial_breadcrumb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinline int
|
2019-06-21 14:08:10 +07:00
|
|
|
__intel_timeline_get_seqno(struct intel_timeline *tl,
|
|
|
|
struct i915_request *rq,
|
|
|
|
u32 *seqno)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline_cacheline *cl;
|
2019-03-02 00:08:59 +07:00
|
|
|
unsigned int cacheline;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
void *vaddr;
|
|
|
|
int err;
|
|
|
|
|
2020-02-03 16:41:49 +07:00
|
|
|
might_lock(&tl->gt->ggtt->vm.mutex);
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
/*
|
|
|
|
* If there is an outstanding GPU reference to this cacheline,
|
|
|
|
* such as it being sampled by a HW semaphore on another timeline,
|
|
|
|
* we cannot wraparound our seqno value (the HW semaphore does
|
|
|
|
* a strict greater-than-or-equals compare, not i915_seqno_passed).
|
|
|
|
* So if the cacheline is still busy, we must detach ourselves
|
|
|
|
* from it and leave it inflight alongside its users.
|
|
|
|
*
|
|
|
|
* However, if nobody is watching and we can guarantee that nobody
|
|
|
|
* will, we could simply reuse the same cacheline.
|
|
|
|
*
|
|
|
|
* if (i915_active_request_is_signaled(&tl->last_request) &&
|
|
|
|
* i915_active_is_signaled(&tl->hwsp_cacheline->active))
|
|
|
|
* return 0;
|
|
|
|
*
|
|
|
|
* That seems unlikely for a busy timeline that needed to wrap in
|
|
|
|
* the first place, so just replace the cacheline.
|
|
|
|
*/
|
|
|
|
|
|
|
|
vma = hwsp_alloc(tl, &cacheline);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_rollback;
|
|
|
|
}
|
|
|
|
|
2020-01-31 01:17:10 +07:00
|
|
|
err = i915_ggtt_pin(vma, 0, PIN_HIGH);
|
2019-03-02 00:08:59 +07:00
|
|
|
if (err) {
|
|
|
|
__idle_hwsp_free(vma->private, cacheline);
|
|
|
|
goto err_rollback;
|
|
|
|
}
|
|
|
|
|
|
|
|
cl = cacheline_alloc(vma->private, cacheline);
|
|
|
|
if (IS_ERR(cl)) {
|
|
|
|
err = PTR_ERR(cl);
|
|
|
|
__idle_hwsp_free(vma->private, cacheline);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(cl->hwsp->vma != vma);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach the old cacheline to the current request, so that we only
|
|
|
|
* free it after the current request is retired, which ensures that
|
|
|
|
* all writes into the cacheline from previous requests are complete.
|
|
|
|
*/
|
2019-10-04 20:40:00 +07:00
|
|
|
err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
|
2019-03-02 00:08:59 +07:00
|
|
|
if (err)
|
|
|
|
goto err_cacheline;
|
|
|
|
|
|
|
|
cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
|
|
|
|
cacheline_free(tl->hwsp_cacheline);
|
|
|
|
|
|
|
|
i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
|
|
|
|
i915_vma_put(tl->hwsp_ggtt);
|
|
|
|
|
|
|
|
tl->hwsp_ggtt = i915_vma_get(vma);
|
|
|
|
|
|
|
|
vaddr = page_mask_bits(cl->vaddr);
|
|
|
|
tl->hwsp_offset = cacheline * CACHELINE_BYTES;
|
|
|
|
tl->hwsp_seqno =
|
|
|
|
memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
|
|
|
|
|
|
|
|
tl->hwsp_offset += i915_ggtt_offset(vma);
|
|
|
|
|
|
|
|
cacheline_acquire(cl);
|
|
|
|
tl->hwsp_cacheline = cl;
|
|
|
|
|
|
|
|
*seqno = timeline_advance(tl);
|
|
|
|
GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_cacheline:
|
|
|
|
cacheline_free(cl);
|
|
|
|
err_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
err_rollback:
|
|
|
|
timeline_rollback(tl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
int intel_timeline_get_seqno(struct intel_timeline *tl,
|
|
|
|
struct i915_request *rq,
|
|
|
|
u32 *seqno)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
|
|
|
*seqno = timeline_advance(tl);
|
|
|
|
|
|
|
|
/* Replace the HWSP on wraparound for HW semaphores */
|
|
|
|
if (unlikely(!*seqno && tl->hwsp_cacheline))
|
2019-06-21 14:08:10 +07:00
|
|
|
return __intel_timeline_get_seqno(tl, rq, seqno);
|
2019-03-02 00:08:59 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
static int cacheline_ref(struct intel_timeline_cacheline *cl,
|
2019-03-02 00:08:59 +07:00
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 18:19:10 +07:00
|
|
|
return i915_active_add_request(&cl->active, rq);
|
2019-03-02 00:08:59 +07:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
int intel_timeline_read_hwsp(struct i915_request *from,
|
|
|
|
struct i915_request *to,
|
|
|
|
u32 *hwsp)
|
2019-03-02 00:08:59 +07:00
|
|
|
{
|
2019-12-17 08:16:59 +07:00
|
|
|
struct intel_timeline_cacheline *cl;
|
2019-03-02 00:08:59 +07:00
|
|
|
int err;
|
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
|
|
|
|
|
2019-09-19 18:19:12 +07:00
|
|
|
rcu_read_lock();
|
2019-12-17 08:16:59 +07:00
|
|
|
cl = rcu_dereference(from->hwsp_cacheline);
|
|
|
|
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
|
|
|
|
goto unlock; /* seqno wrapped and completed! */
|
|
|
|
if (unlikely(i915_request_completed(from)))
|
|
|
|
goto release;
|
2019-09-19 18:19:12 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
err = cacheline_ref(cl, to);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-09-19 18:19:12 +07:00
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
|
|
|
|
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
|
2019-09-19 18:19:12 +07:00
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
out:
|
|
|
|
i915_active_release(&cl->active);
|
|
|
|
return err;
|
2019-09-19 18:19:12 +07:00
|
|
|
|
2019-12-17 08:16:59 +07:00
|
|
|
release:
|
|
|
|
i915_active_release(&cl->active);
|
2019-09-19 18:19:12 +07:00
|
|
|
unlock:
|
2019-12-17 08:16:59 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
return 1;
|
2019-03-02 00:08:59 +07:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
void intel_timeline_unpin(struct intel_timeline *tl)
|
2019-01-29 01:18:09 +07:00
|
|
|
{
|
2019-08-16 03:57:08 +07:00
|
|
|
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
|
|
|
if (!atomic_dec_and_test(&tl->pin_count))
|
2019-01-29 01:18:09 +07:00
|
|
|
return;
|
|
|
|
|
2019-03-02 00:08:59 +07:00
|
|
|
cacheline_release(tl->hwsp_cacheline);
|
2019-01-29 01:18:12 +07:00
|
|
|
|
2019-01-29 01:18:09 +07:00
|
|
|
__i915_vma_unpin(tl->hwsp_ggtt);
|
|
|
|
}
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
void __intel_timeline_free(struct kref *kref)
|
2018-05-02 23:38:39 +07:00
|
|
|
{
|
2019-06-21 14:08:10 +07:00
|
|
|
struct intel_timeline *timeline =
|
2018-05-02 23:38:39 +07:00
|
|
|
container_of(kref, typeof(*timeline), kref);
|
|
|
|
|
2019-06-21 14:08:10 +07:00
|
|
|
intel_timeline_fini(timeline);
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 18:19:10 +07:00
|
|
|
kfree_rcu(timeline, rcu);
|
2018-05-02 23:38:39 +07:00
|
|
|
}
|
|
|
|
|
2019-11-01 20:04:06 +07:00
|
|
|
void intel_gt_fini_timelines(struct intel_gt *gt)
|
2019-01-28 17:23:56 +07:00
|
|
|
{
|
2019-06-21 20:16:39 +07:00
|
|
|
struct intel_gt_timelines *timelines = >->timelines;
|
2019-01-28 17:23:56 +07:00
|
|
|
|
2019-06-21 14:08:03 +07:00
|
|
|
GEM_BUG_ON(!list_empty(&timelines->active_list));
|
|
|
|
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
|
|
|
|
}
|
|
|
|
|
2018-05-02 23:38:39 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
2019-06-21 14:08:10 +07:00
|
|
|
#include "gt/selftests/mock_timeline.c"
|
|
|
|
#include "gt/selftest_timeline.c"
|
2018-05-02 23:38:39 +07:00
|
|
|
#endif
|