drm/amd/display: Clean up locking in dcn*_apply_ctx_for_surface()

[Why]

dcn*_disable_plane() doesn't unlock the pipe anymore, making the extra
lock unnecessary.

In addition - during full plane updates - all necessary pipes should be
locked/unlocked together when modifying hubp to avoid tearing in
pipesplit setups.

[How]

Remove redundant locks, and add function to lock all pipes. If an
interdependent pipe update is required, lock down all pipes. Otherwise,
lock only the top pipe for the updated pipe tree.

Signed-off-by: Leo Li <sunpeng.li@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Leo Li 2019-03-20 09:52:14 -04:00 committed by Alex Deucher
parent 113b7a0108
commit c7e557ab46
2 changed files with 49 additions and 21 deletions

View File

@ -2329,6 +2329,7 @@ static void dcn10_apply_ctx_for_surface(
int i; int i;
struct timing_generator *tg; struct timing_generator *tg;
bool removed_pipe[4] = { false }; bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program = struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream); find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger); DC_LOGGER_INIT(dc->ctx->logger);
@ -2338,6 +2339,12 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg; tg = top_pipe_to_program->stream_res.tg;
interdependent_update = top_pipe_to_program->plane_state &&
top_pipe_to_program->plane_state->update_flags.bits.full_update;
if (interdependent_update)
lock_all_pipes(dc, context, true);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, true); dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) { if (num_planes == 0) {
@ -2359,14 +2366,8 @@ static void dcn10_apply_ctx_for_surface(
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg && if (old_pipe_ctx->stream_res.tg == tg &&
old_pipe_ctx->plane_res.hubp && old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != 0xf) { old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx); dcn10_disable_plane(dc, old_pipe_ctx);
/*
* power down fe will unlock when calling reset, need
* to lock it back here. Messy, need rework.
*/
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
}
} }
if ((!pipe_ctx->plane_state || if ((!pipe_ctx->plane_state ||
@ -2385,29 +2386,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0) if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context); program_all_pipe_in_tree(dc, top_pipe_to_program, context);
dcn10_pipe_control_lock(dc, top_pipe_to_program, false); if (interdependent_update)
if (top_pipe_to_program->plane_state &&
top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */ /* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
|| !pipe_ctx->plane_state !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
|| !tg->funcs->is_tg_enabled(tg))
continue; continue;
tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp, pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs, &pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs); &pipe_ctx->ttu_regs);
tg->funcs->unlock(tg);
} }
if (interdependent_update)
lock_all_pipes(dc, context, false);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
if (num_planes == 0) if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg); false_optc_underflow_wa(dc, stream, tg);
@ -2814,6 +2811,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start; return vertical_line_start;
} }
void lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
struct pipe_ctx *pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream || !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
if (lock)
tg->funcs->lock(tg);
else
tg->funcs->unlock(tg);
}
}
static void calc_vupdate_position( static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx, struct pipe_ctx *pipe_ctx,
uint32_t *start_line, uint32_t *start_line,

View File

@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
void lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock);
#endif /* __DC_HWSS_DCN10_H__ */ #endif /* __DC_HWSS_DCN10_H__ */