mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
drm/i915/gvt: Drop redundant prepare_write/pin_pages
Since gvt calls pin_map for the shadow batch buffer, this makes the action of prepare_write [+pin_pages] redundant. We can write into the obj->mm.mapping directory and the flush_map routine knows when it has to flush the cpu cache afterwards. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200619234543.17499-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
4fb3395343
commit
033ef711bb
@ -1904,19 +1904,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
goto err_free_bb;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
|
||||
if (ret)
|
||||
goto err_free_obj;
|
||||
|
||||
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
|
||||
if (IS_ERR(bb->va)) {
|
||||
ret = PTR_ERR(bb->va);
|
||||
goto err_finish_shmem_access;
|
||||
}
|
||||
|
||||
if (bb->clflush & CLFLUSH_BEFORE) {
|
||||
drm_clflush_virt_range(bb->va, bb->obj->base.size);
|
||||
bb->clflush &= ~CLFLUSH_BEFORE;
|
||||
goto err_free_obj;
|
||||
}
|
||||
|
||||
ret = copy_gma_to_hva(s->vgpu, mm,
|
||||
@ -1935,7 +1926,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
INIT_LIST_HEAD(&bb->list);
|
||||
list_add(&bb->list, &s->workload->shadow_bb);
|
||||
|
||||
bb->accessing = true;
|
||||
bb->bb_start_cmd_va = s->ip_va;
|
||||
|
||||
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
|
||||
@ -1956,8 +1946,6 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
return 0;
|
||||
err_unmap:
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
err_finish_shmem_access:
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
err_free_obj:
|
||||
i915_gem_object_put(bb->obj);
|
||||
err_free_bb:
|
||||
|
@ -505,26 +505,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
|
||||
+ bb->bb_offset;
|
||||
|
||||
if (bb->ppgtt) {
|
||||
/* for non-priv bb, scan&shadow is only for
|
||||
* debugging purpose, so the content of shadow bb
|
||||
* is the same as original bb. Therefore,
|
||||
* here, rather than switch to shadow bb's gma
|
||||
* address, we directly use original batch buffer's
|
||||
* gma address, and send original bb to hardware
|
||||
* directly
|
||||
*/
|
||||
if (bb->clflush & CLFLUSH_AFTER) {
|
||||
drm_clflush_virt_range(bb->va,
|
||||
bb->obj->base.size);
|
||||
bb->clflush &= ~CLFLUSH_AFTER;
|
||||
}
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
bb->accessing = false;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* For non-priv bb, scan&shadow is only for
|
||||
* debugging purpose, so the content of shadow bb
|
||||
* is the same as original bb. Therefore,
|
||||
* here, rather than switch to shadow bb's gma
|
||||
* address, we directly use original batch buffer's
|
||||
* gma address, and send original bb to hardware
|
||||
* directly
|
||||
*/
|
||||
if (!bb->ppgtt) {
|
||||
bb->vma = i915_gem_object_ggtt_pin(bb->obj,
|
||||
NULL, 0, 0, 0);
|
||||
NULL, 0, 0, 0);
|
||||
if (IS_ERR(bb->vma)) {
|
||||
ret = PTR_ERR(bb->vma);
|
||||
goto err;
|
||||
@ -535,27 +527,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
if (gmadr_bytes == 8)
|
||||
bb->bb_start_cmd_va[2] = 0;
|
||||
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
if (bb->clflush & CLFLUSH_AFTER) {
|
||||
drm_clflush_virt_range(bb->va,
|
||||
bb->obj->base.size);
|
||||
bb->clflush &= ~CLFLUSH_AFTER;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(bb->obj,
|
||||
false);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = i915_vma_move_to_active(bb->vma,
|
||||
workload->req,
|
||||
0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
bb->accessing = false;
|
||||
}
|
||||
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
@ -626,9 +606,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
|
||||
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
||||
if (bb->obj) {
|
||||
if (bb->accessing)
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
|
||||
if (bb->va && !IS_ERR(bb->va))
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
|
||||
|
@ -124,8 +124,6 @@ struct intel_vgpu_shadow_bb {
|
||||
struct i915_vma *vma;
|
||||
void *va;
|
||||
u32 *bb_start_cmd_va;
|
||||
unsigned int clflush;
|
||||
bool accessing;
|
||||
unsigned long bb_offset;
|
||||
bool ppgtt;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user