mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-14 21:27:32 +07:00
drm/amdgpu: move IB and frame size directly into the engine description
I should have suggested that on the initial patchset. This saves us a few CPU cycles during CS and a bunch of loc. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7bc6be825a
commit
e12f3d7a23
@ -1962,8 +1962,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||
#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
|
||||
#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
|
||||
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
||||
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
||||
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
|
||||
|
@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
|
||||
num_ibs * amdgpu_ring_get_emit_ib_size(ring);
|
||||
alloc_size = ring->funcs->emit_frame_size + num_ibs *
|
||||
ring->funcs->emit_ib_size;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, alloc_size);
|
||||
if (r) {
|
||||
|
@ -98,6 +98,9 @@ struct amdgpu_ring_funcs {
|
||||
void (*set_wptr)(struct amdgpu_ring *ring);
|
||||
/* validating and patching of IBs */
|
||||
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
/* constants to calculate how many DW are needed for an emit */
|
||||
unsigned emit_frame_size;
|
||||
unsigned emit_ib_size;
|
||||
/* command emit functions */
|
||||
void (*emit_ib)(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib,
|
||||
@ -127,8 +130,6 @@ struct amdgpu_ring_funcs {
|
||||
void (*end_use)(struct amdgpu_ring *ring);
|
||||
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
|
||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||
unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
|
||||
unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
|
||||
};
|
||||
|
||||
struct amdgpu_ring {
|
||||
|
@ -824,18 +824,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* amdgpu_vce_ring_emit_ib */
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_test_ring - test if VCE ring is working
|
||||
*
|
||||
|
@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 4; /* cik_sdma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* cik_sdma_ring_emit_hdp_flush */
|
||||
3 + /* cik_sdma_ring_emit_hdp_invalidate */
|
||||
6 + /* cik_sdma_ring_emit_pipeline_sync */
|
||||
12 + /* cik_sdma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
@ -1228,6 +1212,13 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.get_rptr = cik_sdma_ring_get_rptr,
|
||||
.get_wptr = cik_sdma_ring_get_wptr,
|
||||
.set_wptr = cik_sdma_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* cik_sdma_ring_emit_hdp_flush */
|
||||
3 + /* cik_sdma_ring_emit_hdp_invalidate */
|
||||
6 + /* cik_sdma_ring_emit_pipeline_sync */
|
||||
12 + /* cik_sdma_ring_emit_vm_flush */
|
||||
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
|
||||
.emit_ib = cik_sdma_ring_emit_ib,
|
||||
.emit_fence = cik_sdma_ring_emit_fence,
|
||||
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
|
||||
@ -1238,8 +1229,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.test_ib = cik_sdma_ring_test_ib,
|
||||
.insert_nop = cik_sdma_ring_insert_nop,
|
||||
.pad_ib = cik_sdma_ring_pad_ib,
|
||||
.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* gfx_v6_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
3; /* gfx_v6_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v6_0_select_se_sh,
|
||||
@ -3258,6 +3231,14 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_gfx,
|
||||
.emit_frame_size =
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
3, /* gfx_v6_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
|
||||
.emit_ib = gfx_v6_0_ring_emit_ib,
|
||||
.emit_fence = gfx_v6_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
|
||||
@ -3268,14 +3249,19 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
|
||||
.test_ib = gfx_v6_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
|
||||
.get_rptr = gfx_v6_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v6_0_ring_get_wptr,
|
||||
.set_wptr = gfx_v6_0_ring_set_wptr_compute,
|
||||
.emit_frame_size =
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v6_0_ring_emit_vm_flush */
|
||||
14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
|
||||
.emit_ib = gfx_v6_0_ring_emit_ib,
|
||||
.emit_fence = gfx_v6_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
|
||||
@ -3285,8 +3271,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
|
||||
.test_ring = gfx_v6_0_ring_test_ring,
|
||||
.test_ib = gfx_v6_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -4357,41 +4357,6 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3; /* gfx_v7_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v7_0_select_se_sh,
|
||||
@ -5147,6 +5112,15 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3, /* gfx_v7_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
@ -5159,14 +5133,20 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
@ -5178,8 +5158,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -6363,42 +6363,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3; /* gfx_v8_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
@ -6568,6 +6532,16 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3, /* gfx_v8_ring_emit_cntxcntl */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
@ -6581,14 +6555,20 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_switch_buffer = gfx_v8_ring_emit_sb,
|
||||
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
.emit_frame_size =
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
@ -6600,8 +6580,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v2_4_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v2_4_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v2_4_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v2_4_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1225,6 +1209,13 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.get_rptr = sdma_v2_4_ring_get_rptr,
|
||||
.get_wptr = sdma_v2_4_ring_get_wptr,
|
||||
.set_wptr = sdma_v2_4_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v2_4_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v2_4_ring_emit_vm_flush */
|
||||
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
|
||||
.emit_ib = sdma_v2_4_ring_emit_ib,
|
||||
.emit_fence = sdma_v2_4_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
|
||||
@ -1235,8 +1226,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.test_ib = sdma_v2_4_ring_test_ib,
|
||||
.insert_nop = sdma_v2_4_ring_insert_nop,
|
||||
.pad_ib = sdma_v2_4_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v3_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v3_0_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v3_0_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1568,6 +1552,13 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.get_rptr = sdma_v3_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v3_0_ring_get_wptr,
|
||||
.set_wptr = sdma_v3_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* sdma_v3_0_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v3_0_ring_emit_vm_flush */
|
||||
10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v3_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v3_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
|
||||
@ -1578,8 +1569,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.test_ib = sdma_v3_0_ring_test_ib,
|
||||
.insert_nop = sdma_v3_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v3_0_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 3; /* si_dma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
3 + /* si_dma_ring_emit_hdp_flush */
|
||||
3 + /* si_dma_ring_emit_hdp_invalidate */
|
||||
6 + /* si_dma_ring_emit_pipeline_sync */
|
||||
12 + /* si_dma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int si_dma_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -783,6 +767,13 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
||||
.get_rptr = si_dma_ring_get_rptr,
|
||||
.get_wptr = si_dma_ring_get_wptr,
|
||||
.set_wptr = si_dma_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
3 + /* si_dma_ring_emit_hdp_flush */
|
||||
3 + /* si_dma_ring_emit_hdp_invalidate */
|
||||
6 + /* si_dma_ring_emit_pipeline_sync */
|
||||
12 + /* si_dma_ring_emit_vm_flush */
|
||||
9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
|
||||
.emit_ib = si_dma_ring_emit_ib,
|
||||
.emit_fence = si_dma_ring_emit_fence,
|
||||
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
|
||||
@ -793,8 +784,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
||||
.test_ib = si_dma_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = si_dma_ring_pad_ib,
|
||||
.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -526,20 +526,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* uvd_v4_2_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v4_2_mc_resume - memory controller programming
|
||||
*
|
||||
@ -760,6 +746,11 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
||||
.get_wptr = uvd_v4_2_ring_get_wptr,
|
||||
.set_wptr = uvd_v4_2_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
|
||||
14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
|
||||
.emit_ib = uvd_v4_2_ring_emit_ib,
|
||||
.emit_fence = uvd_v4_2_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
|
||||
@ -770,8 +761,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -577,20 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* uvd_v5_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static bool uvd_v5_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -811,6 +797,11 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
.get_wptr = uvd_v5_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v5_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
|
||||
14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v5_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v5_0_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
|
||||
@ -821,8 +812,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -725,31 +725,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0xE);
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
8; /* uvd_v6_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
20 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
}
|
||||
|
||||
static bool uvd_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1052,6 +1027,12 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
|
||||
@ -1062,14 +1043,19 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.get_rptr = uvd_v6_0_ring_get_rptr,
|
||||
.get_wptr = uvd_v6_0_ring_get_wptr,
|
||||
.set_wptr = uvd_v6_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
20 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
|
||||
@ -1082,8 +1068,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
|
||||
};
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -614,6 +614,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
||||
.get_wptr = vce_v2_0_ring_get_wptr,
|
||||
.set_wptr = vce_v2_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_vce_ring_parse_cs,
|
||||
.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
|
||||
.emit_ib = amdgpu_vce_ring_emit_ib,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
@ -622,8 +624,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -808,27 +808,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, seq);
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5; /* vce_v3_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* vce_v3_0_emit_vm_flush */
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs vce_v3_0_ip_funcs = {
|
||||
.name = "vce_v3_0",
|
||||
.early_init = vce_v3_0_early_init,
|
||||
@ -854,6 +833,10 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
.parse_cs = amdgpu_vce_ring_parse_cs,
|
||||
.emit_frame_size =
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
|
||||
.emit_ib = amdgpu_vce_ring_emit_ib,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
@ -862,14 +845,17 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
|
||||
.get_rptr = vce_v3_0_ring_get_rptr,
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
6 + /* vce_v3_0_emit_vm_flush */
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
|
||||
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
|
||||
.emit_ib = vce_v3_0_ring_emit_ib,
|
||||
.emit_vm_flush = vce_v3_0_emit_vm_flush,
|
||||
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
|
||||
@ -880,8 +866,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
|
||||
};
|
||||
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
Loading…
Reference in New Issue
Block a user