mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:06:43 +07:00
Merge tag 'amd-drm-next-5.6-2020-01-10-dp-mst-dsc' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.6-2020-01-10-dp-mst-dsc: drm: - Add MST helper for PBN calculation of DSC modes - Parse FEC caps on MST ports - Add MST DPCD R/W functions - Add MST helpers for virtual DPCD aux - Add MST HUB quirk - Add MST DSC enablement helpers amdgpu: - Enable MST DSC - Add fair share algo for DSC bandwidth calcs - Fix for 32 bit builds Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200110214328.308549-1-alexander.deucher@amd.com
This commit is contained in:
commit
688486a49c
@ -4933,12 +4933,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
is_y420);
|
||||
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
|
||||
clock = adjusted_mode->clock;
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
|
||||
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
|
||||
}
|
||||
dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
|
||||
mst_mgr,
|
||||
mst_port,
|
||||
dm_new_connector_state->pbn);
|
||||
dm_new_connector_state->pbn,
|
||||
0);
|
||||
if (dm_new_connector_state->vcpi_slots < 0) {
|
||||
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
|
||||
return dm_new_connector_state->vcpi_slots;
|
||||
@ -4951,6 +4952,71 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
|
||||
.atomic_check = dm_encoder_helper_atomic_check
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state)
|
||||
{
|
||||
struct dc_stream_state *stream = NULL;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *new_con_state, *old_con_state;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct dm_connector_state *dm_conn_state;
|
||||
int i, j, clock, bpp;
|
||||
int vcpi, pbn_div, pbn = 0;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!aconnector->port)
|
||||
continue;
|
||||
|
||||
if (!new_con_state || !new_con_state->crtc)
|
||||
continue;
|
||||
|
||||
dm_conn_state = to_dm_connector_state(new_con_state);
|
||||
|
||||
for (j = 0; j < dc_state->stream_count; j++) {
|
||||
stream = dc_state->streams[j];
|
||||
if (!stream)
|
||||
continue;
|
||||
|
||||
if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
|
||||
break;
|
||||
|
||||
stream = NULL;
|
||||
}
|
||||
|
||||
if (!stream)
|
||||
continue;
|
||||
|
||||
if (stream->timing.flags.DSC != 1) {
|
||||
drm_dp_mst_atomic_enable_dsc(state,
|
||||
aconnector->port,
|
||||
dm_conn_state->pbn,
|
||||
0,
|
||||
false);
|
||||
continue;
|
||||
}
|
||||
|
||||
pbn_div = dm_mst_get_pbn_divider(stream->link);
|
||||
bpp = stream->timing.dsc_cfg.bits_per_pixel;
|
||||
clock = stream->timing.pix_clk_100hz / 10;
|
||||
pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
|
||||
vcpi = drm_dp_mst_atomic_enable_dsc(state,
|
||||
aconnector->port,
|
||||
pbn, pbn_div,
|
||||
true);
|
||||
if (vcpi < 0)
|
||||
return vcpi;
|
||||
|
||||
dm_conn_state->pbn = pbn;
|
||||
dm_conn_state->vcpi_slots = vcpi;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void dm_drm_plane_reset(struct drm_plane *plane)
|
||||
{
|
||||
struct dm_plane_state *amdgpu_state = NULL;
|
||||
@ -7829,6 +7895,29 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
int i;
|
||||
for_each_new_connector_in_state(state, connector, conn_state, i) {
|
||||
if (conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (!aconnector->port || !aconnector->mst_port)
|
||||
aconnector = NULL;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (!aconnector)
|
||||
return 0;
|
||||
|
||||
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
|
||||
* @dev: The DRM device
|
||||
@ -7881,6 +7970,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (adev->asic_type >= CHIP_NAVI10) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
||||
ret = add_affected_mst_dsc_crtcs(state, crtc);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed &&
|
||||
@ -7984,11 +8083,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Perform validation of MST topology in the state*/
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (state->legacy_cursor_update) {
|
||||
/*
|
||||
* This is a fast cursor update coming from the plane update
|
||||
@ -8057,6 +8151,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
|
||||
goto fail;
|
||||
|
||||
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
|
||||
if (ret)
|
||||
goto fail;
|
||||
#endif
|
||||
|
||||
if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
@ -8085,6 +8188,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
dc_retain_state(old_dm_state->context);
|
||||
}
|
||||
}
|
||||
/* Perform validation of MST topology in the state*/
|
||||
ret = drm_dp_mst_atomic_check(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Store the overall update type for use later in atomic check. */
|
||||
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
|
||||
|
@ -330,6 +330,7 @@ struct amdgpu_dm_connector {
|
||||
struct drm_dp_mst_port *port;
|
||||
struct amdgpu_dm_connector *mst_port;
|
||||
struct amdgpu_encoder *mst_encoder;
|
||||
struct drm_dp_aux *dsc_aux;
|
||||
|
||||
/* TODO see if we can merge with ddc_bus or make a dm_connector */
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "dc.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
#include "amdgpu_dm_mst_types.h"
|
||||
|
||||
#include "dm_helpers.h"
|
||||
|
||||
@ -516,8 +517,24 @@ bool dm_helpers_dp_write_dsc_enable(
|
||||
)
|
||||
{
|
||||
uint8_t enable_dsc = enable ? 1 : 0;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
|
||||
return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1);
|
||||
if (!stream)
|
||||
return false;
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
|
||||
}
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
|
||||
return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include "dm_services.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
@ -39,6 +40,12 @@
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
#include "dc/dcn20/dcn20_resource.h"
|
||||
#endif
|
||||
|
||||
/* #define TRACE_DPCD */
|
||||
|
||||
#ifdef TRACE_DPCD
|
||||
@ -180,6 +187,30 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
.early_unregister = amdgpu_dm_mst_connector_early_unregister,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink *dc_sink = aconnector->dc_sink;
|
||||
struct drm_dp_mst_port *port = aconnector->port;
|
||||
u8 dsc_caps[16] = { 0 };
|
||||
|
||||
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
|
||||
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
|
||||
return false;
|
||||
|
||||
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
|
||||
dsc_caps, NULL,
|
||||
&dc_sink->sink_dsc_caps.dsc_dec_caps))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
@ -222,10 +253,16 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
/* dc_link_add_remote_sink returns a new reference */
|
||||
aconnector->dc_sink = dc_sink;
|
||||
|
||||
if (aconnector->dc_sink)
|
||||
if (aconnector->dc_sink) {
|
||||
amdgpu_dm_update_freesync_caps(
|
||||
connector, aconnector->edid);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (!validate_dsc_caps_on_connector(aconnector))
|
||||
memset(&aconnector->dc_sink->sink_dsc_caps,
|
||||
0, sizeof(aconnector->dc_sink->sink_dsc_caps));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
drm_connector_update_edid_property(
|
||||
@ -466,3 +503,384 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
aconnector->connector_id);
|
||||
}
|
||||
|
||||
int dm_mst_get_pbn_divider(struct dc_link *link)
|
||||
{
|
||||
if (!link)
|
||||
return 0;
|
||||
|
||||
return dc_link_bandwidth_kbps(link,
|
||||
dc_link_get_link_cap(link)) / (8 * 1000 * 54);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
|
||||
struct dsc_mst_fairness_params {
|
||||
struct dc_crtc_timing *timing;
|
||||
struct dc_sink *sink;
|
||||
struct dc_dsc_bw_range bw_range;
|
||||
bool compression_possible;
|
||||
struct drm_dp_mst_port *port;
|
||||
};
|
||||
|
||||
struct dsc_mst_fairness_vars {
|
||||
int pbn;
|
||||
bool dsc_enabled;
|
||||
int bpp_x16;
|
||||
};
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps = div_u64(peak_kbps, 1000);
|
||||
return (int) DIV_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
|
||||
if (vars[i].dsc_enabled && dc_dsc_compute_config(
|
||||
params[i].sink->ctx->dc->res_pool->dscs[0],
|
||||
¶ms[i].sink->sink_dsc_caps.dsc_dec_caps,
|
||||
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
0,
|
||||
params[i].timing,
|
||||
¶ms[i].timing->dsc_cfg)) {
|
||||
params[i].timing->flags.DSC = 1;
|
||||
params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
|
||||
} else {
|
||||
params[i].timing->flags.DSC = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
||||
{
|
||||
struct dc_dsc_config dsc_config;
|
||||
u64 kbps;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->sink_dsc_caps.dsc_dec_caps,
|
||||
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
(int) kbps, param.timing, &dsc_config);
|
||||
|
||||
return dsc_config.bits_per_pixel;
|
||||
}
|
||||
|
||||
static void increase_dsc_bpp(struct drm_atomic_state *state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
bool bpp_increased[MAX_PIPES];
|
||||
int initial_slack[MAX_PIPES];
|
||||
int min_initial_slack;
|
||||
int next_index;
|
||||
int remaining_to_increase = 0;
|
||||
int pbn_per_timeslot;
|
||||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i].dsc_enabled) {
|
||||
initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
initial_slack[i] = 0;
|
||||
bpp_increased[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
|
||||
dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
|
||||
|
||||
while (remaining_to_increase) {
|
||||
next_index = -1;
|
||||
min_initial_slack = -1;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (!bpp_increased[i]) {
|
||||
if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
|
||||
min_initial_slack = initial_slack[i];
|
||||
next_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (next_index == -1)
|
||||
break;
|
||||
|
||||
link_timeslots_used = 0;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
|
||||
|
||||
fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
|
||||
|
||||
if (initial_slack[next_index] > fair_pbn_alloc) {
|
||||
vars[next_index].pbn += fair_pbn_alloc;
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,\
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return;
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
|
||||
} else {
|
||||
vars[next_index].pbn -= fair_pbn_alloc;
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
vars[next_index].pbn += initial_slack[next_index];
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return;
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
|
||||
} else {
|
||||
vars[next_index].pbn -= initial_slack[next_index];
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bpp_increased[next_index] = true;
|
||||
remaining_to_increase--;
|
||||
}
|
||||
}
|
||||
|
||||
static void try_disable_dsc(struct drm_atomic_state *state,
|
||||
struct dc_link *dc_link,
|
||||
struct dsc_mst_fairness_params *params,
|
||||
struct dsc_mst_fairness_vars *vars,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
bool tried[MAX_PIPES];
|
||||
int kbps_increase[MAX_PIPES];
|
||||
int max_kbps_increase;
|
||||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
|
||||
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
|
||||
tried[i] = false;
|
||||
remaining_to_try += 1;
|
||||
} else {
|
||||
kbps_increase[i] = 0;
|
||||
tried[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
while (remaining_to_try) {
|
||||
next_index = -1;
|
||||
max_kbps_increase = -1;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (!tried[i]) {
|
||||
if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
|
||||
max_kbps_increase = kbps_increase[i];
|
||||
next_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (next_index == -1)
|
||||
break;
|
||||
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,
|
||||
0) < 0)
|
||||
return;
|
||||
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
vars[next_index].dsc_enabled = false;
|
||||
vars[next_index].bpp_x16 = 0;
|
||||
} else {
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
vars[next_index].pbn,
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
tried[next_index] = true;
|
||||
remaining_to_try--;
|
||||
}
|
||||
}
|
||||
|
||||
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct dc_link *dc_link)
|
||||
{
|
||||
int i;
|
||||
struct dc_stream_state *stream;
|
||||
struct dsc_mst_fairness_params params[MAX_PIPES];
|
||||
struct dsc_mst_fairness_vars vars[MAX_PIPES];
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
int count = 0;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
||||
/* Set up params */
|
||||
for (i = 0; i < dc_state->stream_count; i++) {
|
||||
struct dc_dsc_policy dsc_policy = {0};
|
||||
|
||||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->link != dc_link)
|
||||
continue;
|
||||
|
||||
stream->timing.flags.DSC = 0;
|
||||
|
||||
params[count].timing = &stream->timing;
|
||||
params[count].sink = stream->sink;
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
params[count].port = aconnector->port;
|
||||
params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
|
||||
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
|
||||
if (!dc_dsc_compute_bandwidth_range(
|
||||
stream->sink->ctx->dc->res_pool->dscs[0],
|
||||
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
|
||||
dsc_policy.min_target_bpp,
|
||||
dsc_policy.max_target_bpp,
|
||||
&stream->sink->sink_dsc_caps.dsc_dec_caps,
|
||||
&stream->timing, ¶ms[count].bw_range))
|
||||
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
|
||||
count++;
|
||||
}
|
||||
/* Try no compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i].dsc_enabled = false;
|
||||
vars[i].bpp_x16 = 0;
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[i].port->mgr,
|
||||
params[i].port,
|
||||
vars[i].pbn,
|
||||
0) < 0)
|
||||
return false;
|
||||
}
|
||||
if (!drm_dp_mst_atomic_check(state)) {
|
||||
set_dsc_configs_from_fairness_vars(params, vars, count);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Try max compression */
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible) {
|
||||
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
|
||||
vars[i].dsc_enabled = true;
|
||||
vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[i].port->mgr,
|
||||
params[i].port,
|
||||
vars[i].pbn,
|
||||
dm_mst_get_pbn_divider(dc_link)) < 0)
|
||||
return false;
|
||||
} else {
|
||||
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
|
||||
vars[i].dsc_enabled = false;
|
||||
vars[i].bpp_x16 = 0;
|
||||
if (drm_dp_atomic_find_vcpi_slots(state,
|
||||
params[i].port->mgr,
|
||||
params[i].port,
|
||||
vars[i].pbn,
|
||||
0) < 0)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (drm_dp_mst_atomic_check(state))
|
||||
return false;
|
||||
|
||||
/* Optimize degree of compression */
|
||||
increase_dsc_bpp(state, dc_link, params, vars, count);
|
||||
|
||||
try_disable_dsc(state, dc_link, params, vars, count);
|
||||
|
||||
set_dsc_configs_from_fairness_vars(params, vars, count);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_stream_state *stream;
|
||||
bool computed_streams[MAX_PIPES];
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++)
|
||||
computed_streams[i] = false;
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++) {
|
||||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
continue;
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
if (!aconnector || !aconnector->dc_sink)
|
||||
continue;
|
||||
|
||||
if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
|
||||
continue;
|
||||
|
||||
if (computed_streams[i])
|
||||
continue;
|
||||
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
return false;
|
||||
}
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
|
||||
for (j = 0; j < dc_state->stream_count; j++) {
|
||||
if (dc_state->streams[j]->link == stream->link)
|
||||
computed_streams[j] = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++) {
|
||||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->timing.flags.DSC == 1)
|
||||
dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -29,7 +29,14 @@
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_dm_connector;
|
||||
|
||||
int dm_mst_get_pbn_divider(struct dc_link *link);
|
||||
|
||||
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *aconnector);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -525,6 +525,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
struct dsc_config dsc_cfg;
|
||||
uint8_t dsc_packed_pps[128];
|
||||
|
||||
memset(&dsc_cfg, 0, sizeof(dsc_cfg));
|
||||
memset(dsc_packed_pps, 0, 128);
|
||||
|
||||
/* Enable DSC hw block */
|
||||
dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
|
@ -206,6 +206,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str
|
||||
struct dsc_reg_values dsc_reg_vals;
|
||||
struct dsc_optc_config dsc_optc_cfg;
|
||||
|
||||
memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals));
|
||||
memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg));
|
||||
|
||||
DC_LOG_DSC("Getting packed DSC PPS for DSC Config:");
|
||||
dsc_config_log(dsc, dsc_cfg);
|
||||
DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
|
||||
|
@ -1569,7 +1569,7 @@ static void release_dsc(struct resource_context *res_ctx,
|
||||
|
||||
|
||||
|
||||
static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
|
||||
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
|
||||
struct dc_state *dc_ctx,
|
||||
struct dc_stream_state *dc_stream)
|
||||
{
|
||||
@ -1584,6 +1584,9 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
|
||||
if (pipe_ctx->stream != dc_stream)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream_res.dsc)
|
||||
continue;
|
||||
|
||||
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
|
||||
|
||||
/* The number of DSCs can be less than the number of pipes */
|
||||
@ -1632,7 +1635,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx,
|
||||
|
||||
/* Get a DSC if required and available */
|
||||
if (result == DC_OK && dc_stream->timing.flags.DSC)
|
||||
result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
|
||||
result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
|
||||
|
||||
if (result == DC_OK)
|
||||
result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
|
||||
|
@ -157,6 +157,7 @@ void dcn20_calculate_dlg_params(
|
||||
|
||||
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream);
|
||||
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
|
||||
enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
|
||||
|
||||
|
@ -163,11 +163,7 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
break;
|
||||
}
|
||||
|
||||
if (aux_dev->aux->is_remote)
|
||||
res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf,
|
||||
todo);
|
||||
else
|
||||
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
|
||||
res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
|
||||
|
||||
if (res <= 0)
|
||||
break;
|
||||
@ -215,11 +211,7 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
break;
|
||||
}
|
||||
|
||||
if (aux_dev->aux->is_remote)
|
||||
res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf,
|
||||
todo);
|
||||
else
|
||||
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
|
||||
res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
|
||||
|
||||
if (res <= 0)
|
||||
break;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
|
||||
#include "drm_crtc_helper_internal.h"
|
||||
|
||||
@ -266,7 +267,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||
|
||||
/**
|
||||
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @aux: DisplayPort AUX channel (SST or MST)
|
||||
* @offset: address of the (first) register to read
|
||||
* @buffer: buffer to store the register values
|
||||
* @size: number of bytes in @buffer
|
||||
@ -295,13 +296,18 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
|
||||
* We just have to do it before any DPCD access and hope that the
|
||||
* monitor doesn't power down exactly after the throw away read.
|
||||
*/
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
|
||||
1);
|
||||
if (ret != 1)
|
||||
goto out;
|
||||
if (!aux->is_remote) {
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
|
||||
buffer, 1);
|
||||
if (ret != 1)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
|
||||
size);
|
||||
if (aux->is_remote)
|
||||
ret = drm_dp_mst_dpcd_read(aux, offset, buffer, size);
|
||||
else
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
|
||||
buffer, size);
|
||||
|
||||
out:
|
||||
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
|
||||
@ -311,7 +317,7 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
|
||||
|
||||
/**
|
||||
* drm_dp_dpcd_write() - write a series of bytes to the DPCD
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @aux: DisplayPort AUX channel (SST or MST)
|
||||
* @offset: address of the (first) register to write
|
||||
* @buffer: buffer containing the values to write
|
||||
* @size: number of bytes in @buffer
|
||||
@ -328,8 +334,12 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
|
||||
size);
|
||||
if (aux->is_remote)
|
||||
ret = drm_dp_mst_dpcd_write(aux, offset, buffer, size);
|
||||
else
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset,
|
||||
buffer, size);
|
||||
|
||||
drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -968,6 +978,19 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_remote_aux_init() - minimally initialise a remote aux channel
|
||||
* @aux: DisplayPort AUX channel
|
||||
*
|
||||
* Used for remote aux channel in general. Merely initialize the crc work
|
||||
* struct.
|
||||
*/
|
||||
void drm_dp_remote_aux_init(struct drm_dp_aux *aux)
|
||||
{
|
||||
INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_remote_aux_init);
|
||||
|
||||
/**
|
||||
* drm_dp_aux_init() - minimally initialise an aux channel
|
||||
* @aux: DisplayPort AUX channel
|
||||
@ -1155,6 +1178,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
|
||||
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
|
||||
/* CH7511 seems to leave SINK_COUNT zeroed */
|
||||
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
|
||||
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
|
||||
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
|
||||
};
|
||||
|
||||
#undef OUI
|
||||
|
@ -853,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
|
||||
{
|
||||
int idx = 1;
|
||||
repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
|
||||
repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
|
||||
idx++;
|
||||
if (idx > raw->curlen)
|
||||
goto fail_len;
|
||||
@ -2174,6 +2175,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port)
|
||||
{
|
||||
mutex_lock(&mgr->lock);
|
||||
port->parent->num_ports--;
|
||||
list_del(&port->next);
|
||||
mutex_unlock(&mgr->lock);
|
||||
drm_dp_mst_topology_put_port(port);
|
||||
@ -2198,6 +2200,9 @@ drm_dp_mst_add_port(struct drm_device *dev,
|
||||
port->aux.dev = dev->dev;
|
||||
port->aux.is_remote = true;
|
||||
|
||||
/* initialize the MST downstream port's AUX crc work queue */
|
||||
drm_dp_remote_aux_init(&port->aux);
|
||||
|
||||
/*
|
||||
* Make sure the memory allocation for our parent branch stays
|
||||
* around until our own memory allocation is released
|
||||
@ -2273,6 +2278,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
|
||||
mutex_lock(&mgr->lock);
|
||||
drm_dp_mst_topology_get_port(port);
|
||||
list_add(&port->next, &mstb->ports);
|
||||
mstb->num_ports++;
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
@ -2951,6 +2957,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
|
||||
path_res->avail_payload_bw_number);
|
||||
port->available_pbn =
|
||||
path_res->avail_payload_bw_number;
|
||||
port->fec_capable = path_res->fec_capable;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4089,6 +4096,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
||||
* @mgr: MST topology manager for the port
|
||||
* @port: port to find vcpi slots for
|
||||
* @pbn: bandwidth required for the mode in PBN
|
||||
* @pbn_div: divider for DSC mode that takes FEC into account
|
||||
*
|
||||
* Allocates VCPI slots to @port, replacing any previous VCPI allocations it
|
||||
* may have had. Any atomic drivers which support MST must call this function
|
||||
@ -4115,11 +4123,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
||||
*/
|
||||
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port, int pbn)
|
||||
struct drm_dp_mst_port *port, int pbn,
|
||||
int pbn_div)
|
||||
{
|
||||
struct drm_dp_mst_topology_state *topology_state;
|
||||
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
|
||||
int prev_slots, req_slots;
|
||||
int prev_slots, prev_bw, req_slots;
|
||||
|
||||
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
|
||||
if (IS_ERR(topology_state))
|
||||
@ -4130,6 +4139,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
|
||||
if (pos->port == port) {
|
||||
vcpi = pos;
|
||||
prev_slots = vcpi->vcpi;
|
||||
prev_bw = vcpi->pbn;
|
||||
|
||||
/*
|
||||
* This should never happen, unless the driver tries
|
||||
@ -4145,14 +4155,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!vcpi)
|
||||
if (!vcpi) {
|
||||
prev_slots = 0;
|
||||
prev_bw = 0;
|
||||
}
|
||||
|
||||
req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
|
||||
if (pbn_div <= 0)
|
||||
pbn_div = mgr->pbn_div;
|
||||
|
||||
req_slots = DIV_ROUND_UP(pbn, pbn_div);
|
||||
|
||||
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
|
||||
port->connector->base.id, port->connector->name,
|
||||
port, prev_slots, req_slots);
|
||||
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
|
||||
port->connector->base.id, port->connector->name,
|
||||
port, prev_bw, pbn);
|
||||
|
||||
/* Add the new allocation to the state */
|
||||
if (!vcpi) {
|
||||
@ -4165,6 +4183,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
|
||||
list_add(&vcpi->next, &topology_state->vcpis);
|
||||
}
|
||||
vcpi->vcpi = req_slots;
|
||||
vcpi->pbn = pbn;
|
||||
|
||||
return req_slots;
|
||||
}
|
||||
@ -4415,10 +4434,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
|
||||
* drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
|
||||
* @clock: dot clock for the mode
|
||||
* @bpp: bpp for the mode.
|
||||
* @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
|
||||
*
|
||||
* This uses the formula in the spec to calculate the PBN value for a mode.
|
||||
*/
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp)
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
|
||||
{
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
@ -4429,7 +4449,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
|
||||
* peak_kbps *= (1006/1000)
|
||||
* peak_kbps *= (64/54)
|
||||
* peak_kbps *= 8 convert to bytes
|
||||
*
|
||||
* If the bpp is in units of 1/16, further divide by 16. Put this
|
||||
* factor in the numerator rather than the denominator to avoid
|
||||
* integer overflow
|
||||
*/
|
||||
|
||||
if (dsc)
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
|
||||
return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
|
||||
8 * 54 * 1000 * 1000);
|
||||
}
|
||||
@ -4731,9 +4760,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
|
||||
kfree(mst_state);
|
||||
}
|
||||
|
||||
static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
|
||||
struct drm_dp_mst_branch *branch)
|
||||
{
|
||||
while (port->parent) {
|
||||
if (port->parent == branch)
|
||||
return true;
|
||||
|
||||
if (port->parent->port_parent)
|
||||
port = port->parent->port_parent;
|
||||
else
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
|
||||
struct drm_dp_mst_topology_state *mst_state)
|
||||
{
|
||||
struct drm_dp_mst_port *port;
|
||||
struct drm_dp_vcpi_allocation *vcpi;
|
||||
int pbn_limit = 0, pbn_used = 0;
|
||||
|
||||
list_for_each_entry(port, &branch->ports, next) {
|
||||
if (port->mstb)
|
||||
if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
|
||||
return -ENOSPC;
|
||||
|
||||
if (port->available_pbn > 0)
|
||||
pbn_limit = port->available_pbn;
|
||||
}
|
||||
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
|
||||
branch, pbn_limit);
|
||||
|
||||
list_for_each_entry(vcpi, &mst_state->vcpis, next) {
|
||||
if (!vcpi->pbn)
|
||||
continue;
|
||||
|
||||
if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
|
||||
pbn_used += vcpi->pbn;
|
||||
}
|
||||
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
|
||||
branch, pbn_used);
|
||||
|
||||
if (pbn_used > pbn_limit) {
|
||||
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
|
||||
branch);
|
||||
return -ENOSPC;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_topology_state *mst_state)
|
||||
drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_topology_state *mst_state)
|
||||
{
|
||||
struct drm_dp_vcpi_allocation *vcpi;
|
||||
int avail_slots = 63, payload_count = 0;
|
||||
@ -4770,6 +4851,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_mst_add_affected_dsc_crtcs
|
||||
* @state: Pointer to the new struct drm_dp_mst_topology_state
|
||||
* @mgr: MST topology manager
|
||||
*
|
||||
* Whenever there is a change in mst topology
|
||||
* DSC configuration would have to be recalculated
|
||||
* therefore we need to trigger modeset on all affected
|
||||
* CRTCs in that topology
|
||||
*
|
||||
* See also:
|
||||
* drm_dp_mst_atomic_enable_dsc()
|
||||
*/
|
||||
int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_vcpi_allocation *pos;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
mst_state = drm_atomic_get_mst_topology_state(state, mgr);
|
||||
|
||||
if (IS_ERR(mst_state))
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(pos, &mst_state->vcpis, next) {
|
||||
|
||||
connector = pos->port->connector;
|
||||
|
||||
if (!connector)
|
||||
return -EINVAL;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, connector);
|
||||
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
|
||||
crtc = conn_state->crtc;
|
||||
|
||||
if (WARN_ON(!crtc))
|
||||
return -EINVAL;
|
||||
|
||||
if (!drm_dp_mst_dsc_aux_for_port(pos->port))
|
||||
continue;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
|
||||
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
|
||||
mgr, crtc);
|
||||
|
||||
crtc_state->mode_changed = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
|
||||
|
||||
/**
|
||||
* drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
|
||||
* @state: Pointer to the new drm_atomic_state
|
||||
* @port: Pointer to the affected MST Port
|
||||
* @pbn: Newly recalculated bw required for link with DSC enabled
|
||||
* @pbn_div: Divider to calculate correct number of pbn per slot
|
||||
* @enable: Boolean flag to enable or disable DSC on the port
|
||||
*
|
||||
* This function enables DSC on the given Port
|
||||
* by recalculating its vcpi from pbn provided
|
||||
* and sets dsc_enable flag to keep track of which
|
||||
* ports have DSC enabled
|
||||
*
|
||||
*/
|
||||
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_port *port,
|
||||
int pbn, int pbn_div,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_vcpi_allocation *pos;
|
||||
bool found = false;
|
||||
int vcpi = 0;
|
||||
|
||||
mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
|
||||
|
||||
if (IS_ERR(mst_state))
|
||||
return PTR_ERR(mst_state);
|
||||
|
||||
list_for_each_entry(pos, &mst_state->vcpis, next) {
|
||||
if (pos->port == port) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
|
||||
port, mst_state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pos->dsc_enabled == enable) {
|
||||
DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
|
||||
port, enable, pos->vcpi);
|
||||
vcpi = pos->vcpi;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
|
||||
DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
|
||||
port, vcpi);
|
||||
if (vcpi < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pos->dsc_enabled = enable;
|
||||
|
||||
return vcpi;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
|
||||
/**
|
||||
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
|
||||
* atomic update is valid
|
||||
@ -4798,7 +5001,10 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
|
||||
int i, ret = 0;
|
||||
|
||||
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
|
||||
ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
|
||||
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
|
||||
if (ret)
|
||||
break;
|
||||
ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@ -5062,3 +5268,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
|
||||
{
|
||||
i2c_del_adapter(&aux->ddc);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
|
||||
* @port: The port to check
|
||||
*
|
||||
* A single physical MST hub object can be represented in the topology
|
||||
* by multiple branches, with virtual ports between those branches.
|
||||
*
|
||||
* As of DP1.4, An MST hub with internal (virtual) ports must expose
|
||||
* certain DPCD registers over those ports. See sections 2.6.1.1.1
|
||||
* and 2.6.1.1.2 of Display Port specification v1.4 for details.
|
||||
*
|
||||
* May acquire mgr->lock
|
||||
*
|
||||
* Returns:
|
||||
* true if the port is a virtual DP peer device, false otherwise
|
||||
*/
|
||||
static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
|
||||
{
|
||||
struct drm_dp_mst_port *downstream_port;
|
||||
|
||||
if (!port || port->dpcd_rev < DP_DPCD_REV_14)
|
||||
return false;
|
||||
|
||||
/* Virtual DP Sink (Internal Display Panel) */
|
||||
if (port->port_num >= 8)
|
||||
return true;
|
||||
|
||||
/* DP-to-HDMI Protocol Converter */
|
||||
if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
|
||||
!port->mcs &&
|
||||
port->ldps)
|
||||
return true;
|
||||
|
||||
/* DP-to-DP */
|
||||
mutex_lock(&port->mgr->lock);
|
||||
if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
|
||||
port->mstb &&
|
||||
port->mstb->num_ports == 2) {
|
||||
list_for_each_entry(downstream_port, &port->mstb->ports, next) {
|
||||
if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
|
||||
!downstream_port->input) {
|
||||
mutex_unlock(&port->mgr->lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&port->mgr->lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
|
||||
* @port: The port to check. A leaf of the MST tree with an attached display.
|
||||
*
|
||||
* Depending on the situation, DSC may be enabled via the endpoint aux,
|
||||
* the immediately upstream aux, or the connector's physical aux.
|
||||
*
|
||||
* This is both the correct aux to read DSC_CAPABILITY and the
|
||||
* correct aux to write DSC_ENABLED.
|
||||
*
|
||||
* This operation can be expensive (up to four aux reads), so
|
||||
* the caller should cache the return.
|
||||
*
|
||||
* Returns:
|
||||
* NULL if DSC cannot be enabled on this port, otherwise the aux device
|
||||
*/
|
||||
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||
{
|
||||
struct drm_dp_mst_port *immediate_upstream_port;
|
||||
struct drm_dp_mst_port *fec_port;
|
||||
struct drm_dp_desc desc = { 0 };
|
||||
u8 endpoint_fec;
|
||||
u8 endpoint_dsc;
|
||||
|
||||
if (!port)
|
||||
return NULL;
|
||||
|
||||
if (port->parent->port_parent)
|
||||
immediate_upstream_port = port->parent->port_parent;
|
||||
else
|
||||
immediate_upstream_port = NULL;
|
||||
|
||||
fec_port = immediate_upstream_port;
|
||||
while (fec_port) {
|
||||
/*
|
||||
* Each physical link (i.e. not a virtual port) between the
|
||||
* output and the primary device must support FEC
|
||||
*/
|
||||
if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
|
||||
!fec_port->fec_capable)
|
||||
return NULL;
|
||||
|
||||
fec_port = fec_port->parent->port_parent;
|
||||
}
|
||||
|
||||
/* DP-to-DP peer device */
|
||||
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
|
||||
u8 upstream_dsc;
|
||||
|
||||
if (drm_dp_dpcd_read(&port->aux,
|
||||
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
|
||||
return NULL;
|
||||
if (drm_dp_dpcd_read(&port->aux,
|
||||
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
|
||||
return NULL;
|
||||
if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
|
||||
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
|
||||
return NULL;
|
||||
|
||||
/* Enpoint decompression with DP-to-DP peer device */
|
||||
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
|
||||
(endpoint_fec & DP_FEC_CAPABLE) &&
|
||||
(upstream_dsc & 0x2) /* DSC passthrough */)
|
||||
return &port->aux;
|
||||
|
||||
/* Virtual DPCD decompression with DP-to-DP peer device */
|
||||
return &immediate_upstream_port->aux;
|
||||
}
|
||||
|
||||
/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
|
||||
if (drm_dp_mst_is_virtual_dpcd(port))
|
||||
return &port->aux;
|
||||
|
||||
/*
|
||||
* Synaptics quirk
|
||||
* Applies to ports for which:
|
||||
* - Physical aux has Synaptics OUI
|
||||
* - DPv1.4 or higher
|
||||
* - Port is on primary branch device
|
||||
* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
|
||||
*/
|
||||
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
|
||||
return NULL;
|
||||
|
||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
|
||||
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
|
||||
port->parent == port->mgr->mst_primary) {
|
||||
u8 downstreamport;
|
||||
|
||||
if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
|
||||
&downstreamport, 1) < 0)
|
||||
return NULL;
|
||||
|
||||
if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
|
||||
((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
|
||||
!= DP_DWN_STRM_PORT_TYPE_ANALOG))
|
||||
return port->mgr->aux;
|
||||
}
|
||||
|
||||
/*
|
||||
* The check below verifies if the MST sink
|
||||
* connected to the GPU is capable of DSC -
|
||||
* therefore the endpoint needs to be
|
||||
* both DSC and FEC capable.
|
||||
*/
|
||||
if (drm_dp_dpcd_read(&port->aux,
|
||||
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
|
||||
return NULL;
|
||||
if (drm_dp_dpcd_read(&port->aux,
|
||||
DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
|
||||
return NULL;
|
||||
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
|
||||
(endpoint_fec & DP_FEC_CAPABLE))
|
||||
return &port->aux;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
|
||||
|
@ -61,10 +61,11 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
crtc_state->pipe_bpp = bpp;
|
||||
|
||||
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
|
||||
crtc_state->pipe_bpp);
|
||||
crtc_state->pipe_bpp,
|
||||
false);
|
||||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
|
||||
port, crtc_state->pbn);
|
||||
port, crtc_state->pbn, 0);
|
||||
if (slots == -EDEADLK)
|
||||
return slots;
|
||||
if (slots >= 0)
|
||||
|
@ -806,11 +806,11 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
|
||||
* topology
|
||||
*/
|
||||
asyh->or.bpc = min(connector->display_info.bpc, 8U);
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
|
||||
}
|
||||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
|
||||
asyh->dp.pbn);
|
||||
asyh->dp.pbn, 0);
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
|
||||
|
@ -518,7 +518,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
|
||||
|
||||
mst_enc = radeon_encoder->enc_priv;
|
||||
|
||||
mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
|
||||
mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
|
||||
|
||||
mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
|
||||
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
|
||||
|
@ -18,15 +18,19 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
|
||||
int rate;
|
||||
int bpp;
|
||||
int expected;
|
||||
bool dsc;
|
||||
} test_params[] = {
|
||||
{ 154000, 30, 689 },
|
||||
{ 234000, 30, 1047 },
|
||||
{ 297000, 24, 1063 },
|
||||
{ 154000, 30, 689, false },
|
||||
{ 234000, 30, 1047, false },
|
||||
{ 297000, 24, 1063, false },
|
||||
{ 332880, 24, 50, true },
|
||||
{ 324540, 24, 49, true },
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(test_params); i++) {
|
||||
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
|
||||
test_params[i].bpp);
|
||||
test_params[i].bpp,
|
||||
test_params[i].dsc);
|
||||
FAIL(pbn != test_params[i].expected,
|
||||
"Expected PBN %d for clock %d bpp %d, got %d\n",
|
||||
test_params[i].expected, test_params[i].rate,
|
||||
|
@ -1465,6 +1465,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
|
||||
void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
const u8 port_cap[4], struct drm_dp_aux *aux);
|
||||
|
||||
void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
|
||||
void drm_dp_aux_init(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_register(struct drm_dp_aux *aux);
|
||||
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
|
||||
@ -1522,6 +1523,13 @@ enum drm_dp_quirk {
|
||||
* The driver should ignore SINK_COUNT during detection.
|
||||
*/
|
||||
DP_DPCD_QUIRK_NO_SINK_COUNT,
|
||||
/**
|
||||
* @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
|
||||
*
|
||||
* The device supports MST DSC despite not supporting Virtual DPCD.
|
||||
* The DSC caps can be read from the physical aux instead.
|
||||
*/
|
||||
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -156,6 +156,8 @@ struct drm_dp_mst_port {
|
||||
* audio-capable.
|
||||
*/
|
||||
bool has_audio;
|
||||
|
||||
bool fec_capable;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -383,6 +385,7 @@ struct drm_dp_port_number_req {
|
||||
|
||||
struct drm_dp_enum_path_resources_ack_reply {
|
||||
u8 port_number;
|
||||
bool fec_capable;
|
||||
u16 full_payload_bw_number;
|
||||
u16 avail_payload_bw_number;
|
||||
};
|
||||
@ -499,6 +502,8 @@ struct drm_dp_payload {
|
||||
struct drm_dp_vcpi_allocation {
|
||||
struct drm_dp_mst_port *port;
|
||||
int vcpi;
|
||||
int pbn;
|
||||
bool dsc_enabled;
|
||||
struct list_head next;
|
||||
};
|
||||
|
||||
@ -727,8 +732,7 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
|
||||
|
||||
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp);
|
||||
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
|
||||
|
||||
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port, int pbn, int slots);
|
||||
@ -777,7 +781,15 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
|
||||
int __must_check
|
||||
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port, int pbn);
|
||||
struct drm_dp_mst_port *port, int pbn,
|
||||
int pbn_div);
|
||||
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_port *port,
|
||||
int pbn, int pbn_div,
|
||||
bool enable);
|
||||
int __must_check
|
||||
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mgr);
|
||||
int __must_check
|
||||
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
@ -789,6 +801,8 @@ int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
|
||||
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
|
||||
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
|
||||
|
||||
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
|
||||
|
||||
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user