mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915/gvt: Drop redundant prepare_write/pin_pages
Since gvt calls pin_map for the shadow batch buffer, this makes the action of prepare_write [+pin_pages] redundant. We can write into the obj->mm.mapping directory and the flush_map routine knows when it has to flush the cpu cache afterwards. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200619234543.17499-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
4fb3395343
commit
033ef711bb
@ -1904,19 +1904,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||||||
goto err_free_bb;
|
goto err_free_bb;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
|
|
||||||
if (ret)
|
|
||||||
goto err_free_obj;
|
|
||||||
|
|
||||||
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
|
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
|
||||||
if (IS_ERR(bb->va)) {
|
if (IS_ERR(bb->va)) {
|
||||||
ret = PTR_ERR(bb->va);
|
ret = PTR_ERR(bb->va);
|
||||||
goto err_finish_shmem_access;
|
goto err_free_obj;
|
||||||
}
|
|
||||||
|
|
||||||
if (bb->clflush & CLFLUSH_BEFORE) {
|
|
||||||
drm_clflush_virt_range(bb->va, bb->obj->base.size);
|
|
||||||
bb->clflush &= ~CLFLUSH_BEFORE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = copy_gma_to_hva(s->vgpu, mm,
|
ret = copy_gma_to_hva(s->vgpu, mm,
|
||||||
@ -1935,7 +1926,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||||||
INIT_LIST_HEAD(&bb->list);
|
INIT_LIST_HEAD(&bb->list);
|
||||||
list_add(&bb->list, &s->workload->shadow_bb);
|
list_add(&bb->list, &s->workload->shadow_bb);
|
||||||
|
|
||||||
bb->accessing = true;
|
|
||||||
bb->bb_start_cmd_va = s->ip_va;
|
bb->bb_start_cmd_va = s->ip_va;
|
||||||
|
|
||||||
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
|
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
|
||||||
@ -1956,8 +1946,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||||||
return 0;
|
return 0;
|
||||||
err_unmap:
|
err_unmap:
|
||||||
i915_gem_object_unpin_map(bb->obj);
|
i915_gem_object_unpin_map(bb->obj);
|
||||||
err_finish_shmem_access:
|
|
||||||
i915_gem_object_finish_access(bb->obj);
|
|
||||||
err_free_obj:
|
err_free_obj:
|
||||||
i915_gem_object_put(bb->obj);
|
i915_gem_object_put(bb->obj);
|
||||||
err_free_bb:
|
err_free_bb:
|
||||||
|
@ -505,26 +505,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||||||
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
|
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
|
||||||
+ bb->bb_offset;
|
+ bb->bb_offset;
|
||||||
|
|
||||||
if (bb->ppgtt) {
|
/*
|
||||||
/* for non-priv bb, scan&shadow is only for
|
* For non-priv bb, scan&shadow is only for
|
||||||
* debugging purpose, so the content of shadow bb
|
* debugging purpose, so the content of shadow bb
|
||||||
* is the same as original bb. Therefore,
|
* is the same as original bb. Therefore,
|
||||||
* here, rather than switch to shadow bb's gma
|
* here, rather than switch to shadow bb's gma
|
||||||
* address, we directly use original batch buffer's
|
* address, we directly use original batch buffer's
|
||||||
* gma address, and send original bb to hardware
|
* gma address, and send original bb to hardware
|
||||||
* directly
|
* directly
|
||||||
*/
|
*/
|
||||||
if (bb->clflush & CLFLUSH_AFTER) {
|
if (!bb->ppgtt) {
|
||||||
drm_clflush_virt_range(bb->va,
|
|
||||||
bb->obj->base.size);
|
|
||||||
bb->clflush &= ~CLFLUSH_AFTER;
|
|
||||||
}
|
|
||||||
i915_gem_object_finish_access(bb->obj);
|
|
||||||
bb->accessing = false;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
|
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
|
||||||
NULL, 0, 0, 0);
|
NULL, 0, 0, 0);
|
||||||
if (IS_ERR(bb->vma)) {
|
if (IS_ERR(bb->vma)) {
|
||||||
ret = PTR_ERR(bb->vma);
|
ret = PTR_ERR(bb->vma);
|
||||||
goto err;
|
goto err;
|
||||||
@ -535,27 +527,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||||||
if (gmadr_bytes == 8)
|
if (gmadr_bytes == 8)
|
||||||
bb->bb_start_cmd_va[2] = 0;
|
bb->bb_start_cmd_va[2] = 0;
|
||||||
|
|
||||||
/* No one is going to touch shadow bb from now on. */
|
|
||||||
if (bb->clflush & CLFLUSH_AFTER) {
|
|
||||||
drm_clflush_virt_range(bb->va,
|
|
||||||
bb->obj->base.size);
|
|
||||||
bb->clflush &= ~CLFLUSH_AFTER;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = i915_gem_object_set_to_gtt_domain(bb->obj,
|
|
||||||
false);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
ret = i915_vma_move_to_active(bb->vma,
|
ret = i915_vma_move_to_active(bb->vma,
|
||||||
workload->req,
|
workload->req,
|
||||||
0);
|
0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
i915_gem_object_finish_access(bb->obj);
|
|
||||||
bb->accessing = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* No one is going to touch shadow bb from now on. */
|
||||||
|
i915_gem_object_flush_map(bb->obj);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
@ -626,9 +606,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||||||
|
|
||||||
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
||||||
if (bb->obj) {
|
if (bb->obj) {
|
||||||
if (bb->accessing)
|
|
||||||
i915_gem_object_finish_access(bb->obj);
|
|
||||||
|
|
||||||
if (bb->va && !IS_ERR(bb->va))
|
if (bb->va && !IS_ERR(bb->va))
|
||||||
i915_gem_object_unpin_map(bb->obj);
|
i915_gem_object_unpin_map(bb->obj);
|
||||||
|
|
||||||
|
@ -124,8 +124,6 @@ struct intel_vgpu_shadow_bb {
|
|||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
void *va;
|
void *va;
|
||||||
u32 *bb_start_cmd_va;
|
u32 *bb_start_cmd_va;
|
||||||
unsigned int clflush;
|
|
||||||
bool accessing;
|
|
||||||
unsigned long bb_offset;
|
unsigned long bb_offset;
|
||||||
bool ppgtt;
|
bool ppgtt;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user