linux_dsm_epyc7002/drivers/gpu/drm/nouveau/dispnv50/disp.c

2239 lines
58 KiB
C
Raw Normal View History

/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "disp.h"
#include "atom.h"
#include "core.h"
#include "head.h"
#include "wndw.h"
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_edid.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cl5070.h>
#include <nvif/cl507d.h>
#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_fence.h"
#include "nouveau_fbcon.h"
#include <subdev/bios/dp.h>
/******************************************************************************
* Atomic state
*****************************************************************************/
struct nv50_outp_atom {
struct list_head head;
struct drm_encoder *encoder;
bool flush_disable;
union nv50_outp_atom_mask {
struct {
bool ctrl:1;
};
u8 mask;
} set, clr;
};
/******************************************************************************
* EVO channel
*****************************************************************************/
static int
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
struct nv50_chan *chan)
{
struct nvif_sclass *sclass;
int ret, i, n;
chan->device = device;
ret = n = nvif_object_sclass_get(disp, &sclass);
if (ret < 0)
return ret;
while (oclass[0]) {
for (i = 0; i < n; i++) {
if (sclass[i].oclass == oclass[0]) {
ret = nvif_object_init(disp, 0, oclass[0],
data, size, &chan->user);
if (ret == 0)
nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
return ret;
}
}
oclass++;
}
nvif_object_sclass_put(&sclass);
return -ENOSYS;
}
static void
nv50_chan_destroy(struct nv50_chan *chan)
{
nvif_object_fini(&chan->user);
}
/******************************************************************************
* DMA EVO channel
*****************************************************************************/
void
nv50_dmac_destroy(struct nv50_dmac *dmac)
{
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
nv50_chan_destroy(&dmac->base);
nvif_mem_fini(&dmac->push);
}
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv50_disp_core_channel_dma_v0 *args = data;
int ret;
mutex_init(&dmac->lock);
ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
&dmac->push);
if (ret)
return ret;
dmac->ptr = dmac->push.object.map.ptr;
args->pushbuf = nvif_handle(&dmac->push.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
if (ret)
return ret;
if (!syncbuf)
return 0;
ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
}, sizeof(struct nv_dma_v0),
&dmac->sync);
if (ret)
return ret;
ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
.limit = device->info.ram_user - 1,
}, sizeof(struct nv_dma_v0),
&dmac->vram);
if (ret)
return ret;
return ret;
}
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
struct nv50_dmac *dmac = evoc;
struct nvif_device *device = dmac->base.device;
u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
if (!nvif_rd32(&dmac->base.user, 0x0004))
break;
) < 0) {
mutex_unlock(&dmac->lock);
pr_err("nouveau: evo channel stalled\n");
return NULL;
}
put = 0;
}
return dmac->ptr + put;
}
void
evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
/******************************************************************************
* Output path helpers
*****************************************************************************/
static void
nv50_outp_release(struct nouveau_encoder *nv_encoder)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
struct {
struct nv50_disp_mthd_v1 base;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_RELEASE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
nv_encoder->or = -1;
nv_encoder->link = 0;
}
static int
nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_acquire_v0 info;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
int ret;
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
if (ret) {
NV_ERROR(drm, "error acquiring output path: %d\n", ret);
return ret;
}
nv_encoder->or = args.info.or;
nv_encoder->link = args.info.link;
return 0;
}
static int
nv50_outp_atomic_check_view(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct drm_display_mode *native_mode)
{
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_connector *connector = conn_state->connector;
struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
asyc->scaler.full = false;
if (!native_mode)
return 0;
if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
/* Force use of scaler for non-EDID modes. */
if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
break;
default:
break;
}
} else {
mode = native_mode;
}
if (!drm_mode_equal(adjusted_mode, mode)) {
drm_mode_copy(adjusted_mode, mode);
crtc_state->mode_changed = true;
}
return 0;
}
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct nouveau_connector *nv_connector =
nouveau_connector(conn_state->connector);
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
nv_connector->native_mode);
}
/******************************************************************************
* DAC
*****************************************************************************/
static void
nv50_dac_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
if (nv_encoder->crtc)
core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
nv50_dac_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
nv50_outp_acquire(nv_encoder);
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
asyh->or.depth = 0;
nv_encoder->crtc = encoder->crtc;
}
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_dac_load_v0 load;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
int ret;
args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
if (args.load.data == 0)
args.load.data = 340;
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
if (ret || !args.load.load)
return connector_status_disconnected;
return connector_status_connected;
}
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
.enable = nv50_dac_enable,
.disable = nv50_dac_disable,
.detect = nv50_dac_detect
};
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* Audio
*****************************************************************************/
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hda_eld_v0 eld;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct __packed {
struct {
struct nv50_disp_mthd_v1 mthd;
struct nv50_disp_sor_hda_eld_v0 eld;
} base;
u8 data[sizeof(nv_connector->base.eld)];
} args = {
.base.mthd.version = 1,
.base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
.base.mthd.hasht = nv_encoder->dcb->hasht,
.base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
}
/******************************************************************************
* HDMI
*****************************************************************************/
static void
nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
}
static void
nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
.pwr.state = 1,
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
struct nouveau_connector *nv_connector;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
union hdmi_infoframe vendor_frame;
int ret;
int size;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
drm: handle HDMI 2.0 VICs in AVI info-frames HDMI 1.4b support the CEA video modes as per range of CEA-861-D (VIC 1-64). For any other mode, the VIC filed in AVI infoframes should be 0. HDMI 2.0 sinks, support video modes range as per CEA-861-F spec, which is extended to (VIC 1-107). This patch adds a bool input variable, which indicates if the connected sink is a HDMI 2.0 sink or not. This will make sure that we don't pass a HDMI 2.0 VIC to a HDMI 1.4 sink. This patch touches all drm drivers, who are callers of this function drm_hdmi_avi_infoframe_from_display_mode but to make sure there is no change in current behavior, is_hdmi2 is kept as false. In case of I915 driver, this patch: - checks if the connected display is HDMI 2.0. - HDMI infoframes carry one of this two type of information: - VIC for 4K modes for HDMI 1.4 sinks - S3D information for S3D modes As CEA-861-F has already defined VICs for 4K videomodes, this patch doesn't allow sending HDMI infoframes for HDMI 2.0 sinks, until the mode is 3D. Cc: Ville Syrjala <ville.syrjala@linux.intel.com> Cc: Jose Abreu <jose.abreu@synopsys.com> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Daniel Vetter <daniel.vetter@intel.com> PS: This patch touches a few lines in few files, which were already above 80 char, so checkpatch gives 80 char warning again. - gpu/drm/omapdrm/omap_encoder.c - gpu/drm/i915/intel_sdvo.c V2: Rebase, Added r-b from Andrzej V3: Addressed review comment from Ville: - Do not send VICs in both AVI-IF and HDMI-IF send only one of it. V4: Rebase V5: Added r-b from Neil. Addressed review comments from Ville - Do not block HDMI vendor IF, instead check for VIC while handling AVI infoframes V6: Rebase V7: Rebase Reviewed-by: Andrzej Hajda <a.hajda@samsung.com> Reviewed-by: Neil Armstrong <narmstrong@baylibre.com> Signed-off-by: Shashank Sharma <shashank.sharma@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1499960000-9232-2-git-send-email-shashank.sharma@intel.com Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
2017-07-13 22:33:07 +07:00
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
false);
if (!ret) {
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
}
2017-11-14 00:04:19 +07:00
ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
&nv_connector->base, mode);
if (!ret) {
/* We have a Vendor InfoFrame, populate it to the display */
args.pwr.vendor_infoframe_length
= hdmi_infoframe_pack(&vendor_frame,
args.infoframes
+ args.pwr.avi_infoframe_length,
17);
}
max_ac_packet = mode->htotal - mode->hdisplay;
max_ac_packet -= args.pwr.rekey;
max_ac_packet -= 18; /* constant from tegra */
args.pwr.max_ac_packet = max_ac_packet / 32;
size = sizeof(args.base)
+ sizeof(args.pwr)
+ args.pwr.avi_infoframe_length
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
nv50_audio_enable(encoder, mode);
}
/******************************************************************************
* MST
*****************************************************************************/
#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
struct nv50_mstm {
struct nouveau_encoder *outp;
struct drm_dp_mst_topology_mgr mgr;
struct nv50_msto *msto[4];
bool modified;
bool disabled;
int links;
};
struct nv50_mstc {
struct nv50_mstm *mstm;
struct drm_dp_mst_port *port;
struct drm_connector connector;
struct drm_display_mode *native;
struct edid *edid;
int pbn;
};
struct nv50_msto {
struct drm_encoder encoder;
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
};
static struct drm_dp_payload *
nv50_msto_payload(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
int vcpi = mstc->port->vcpi.vcpi, i;
NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
for (i = 0; i < mstm->mgr.max_payloads; i++) {
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
mstm->outp->base.base.name, i, payload->vcpi,
payload->start_slot, payload->num_slots);
}
for (i = 0; i < mstm->mgr.max_payloads; i++) {
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
if (payload->vcpi == vcpi)
return payload;
}
return NULL;
}
static void
nv50_msto_cleanup(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
if (msto->disabled) {
msto->mstc = NULL;
msto->head = NULL;
msto->disabled = false;
}
}
static void
nv50_msto_prepare(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
.base.hasht = mstm->outp->dcb->hasht,
.base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
(0x0100 << msto->head->base.index),
};
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
if (mstc->port && mstc->port->vcpi.vcpi > 0) {
struct drm_dp_payload *payload = nv50_msto_payload(msto);
if (payload) {
args.vcpi.start_slot = payload->start_slot;
args.vcpi.num_slots = payload->num_slots;
args.vcpi.pbn = mstc->port->vcpi.pbn;
args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
}
}
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
msto->encoder.name, msto->head->base.base.name,
args.vcpi.start_slot, args.vcpi.num_slots,
args.vcpi.pbn, args.vcpi.aligned_pbn);
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
}
static int
nv50_msto_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
struct nv50_mstm *mstm = mstc->mstm;
int bpp = conn_state->connector->display_info.bpc * 3;
int slots;
mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
if (slots < 0)
return slots;
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
mstc->native);
}
static void
nv50_msto_enable(struct drm_encoder *encoder)
{
struct nv50_head *head = nv50_head(encoder->crtc);
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto, depth;
int slots;
bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->best_encoder == &msto->encoder) {
mstc = nv50_mstc(connector);
mstm = mstc->mstm;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
if (WARN_ON(!mstc))
return;
slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
WARN_ON(!r);
if (!mstm->links++)
nv50_outp_acquire(mstm->outp);
if (mstm->outp->link & 1)
proto = 0x8;
else
proto = 0x9;
switch (mstc->connector.display_info.bpc) {
case 6: depth = 0x2; break;
case 8: depth = 0x5; break;
case 10:
default: depth = 0x6; break;
}
mstm->outp->update(mstm->outp, head->base.index,
nv50_head_atom(head->base.base.state), proto, depth);
msto->head = head;
msto->mstc = mstc;
mstm->modified = true;
}
static void
nv50_msto_disable(struct drm_encoder *encoder)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
if (mstc->port)
drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
mstm->disabled = true;
msto->disabled = true;
}
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
.disable = nv50_msto_disable,
.enable = nv50_msto_enable,
.atomic_check = nv50_msto_atomic_check,
};
static void
nv50_msto_destroy(struct drm_encoder *encoder)
{
struct nv50_msto *msto = nv50_msto(encoder);
drm_encoder_cleanup(&msto->encoder);
kfree(msto);
}
static const struct drm_encoder_funcs
nv50_msto = {
.destroy = nv50_msto_destroy,
};
static int
nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
struct nv50_msto **pmsto)
{
struct nv50_msto *msto;
int ret;
if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
return -ENOMEM;
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
if (ret) {
kfree(*pmsto);
*pmsto = NULL;
return ret;
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
msto->encoder.possible_crtcs = heads;
return 0;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
struct nv50_head *head = nv50_head(connector_state->crtc);
struct nv50_mstc *mstc = nv50_mstc(connector);
if (mstc->port) {
struct nv50_mstm *mstm = mstc->mstm;
return &mstm->msto[head->base.index]->encoder;
}
return NULL;
}
static struct drm_encoder *
nv50_mstc_best_encoder(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
if (mstc->port) {
struct nv50_mstm *mstm = mstc->mstm;
return &mstm->msto[0]->encoder;
}
return NULL;
}
static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static int
nv50_mstc_get_modes(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
if (!mstc->connector.display_info.bpc)
mstc->connector.display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
mstc->native = nouveau_conn_native_mode(&mstc->connector);
return ret;
}
static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
.best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
};
static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
if (!mstc->port)
return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
}
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
drm_connector_cleanup(&mstc->connector);
kfree(mstc);
}
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
.detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
struct nv50_mstc *mstc;
int ret, i;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
mstc->mstm = mstm;
mstc->port = port;
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
return ret;
}
drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(&mstc->connector, path);
return 0;
}
static void
nv50_mstm_cleanup(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
int ret;
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
ret = drm_dp_check_act_status(&mstm->mgr);
ret = drm_dp_update_payload_part2(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
nv50_msto_cleanup(msto);
}
}
mstm->modified = false;
}
static void
nv50_mstm_prepare(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
int ret;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
ret = drm_dp_update_payload_part1(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
nv50_msto_prepare(msto);
}
}
if (mstm->disabled) {
if (!mstm->links)
nv50_outp_release(mstm->outp);
mstm->disabled = false;
}
}
static void
nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct nv50_mstm *mstm = nv50_mstm(mgr);
drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
}
static void
nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nv50_mstc *mstc = nv50_mstc(connector);
drm_connector_unregister(&mstc->connector);
drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
drm/nouveau: Fix deadlock in nv50_mstm_register_connector() Currently; we're grabbing all of the modesetting locks before adding MST connectors to fbdev. This isn't actually necessary, and causes a deadlock as well: ====================================================== WARNING: possible circular locking dependency detected 4.17.0-rc3Lyude-Test+ #1 Tainted: G O ------------------------------------------------------ kworker/1:0/18 is trying to acquire lock: 00000000c832f62d (&helper->lock){+.+.}, at: drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] but task is already holding lock: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (crtc_ww_class_mutex){+.+.}: ww_mutex_lock+0x43/0x80 drm_modeset_lock+0x71/0x130 [drm] drm_helper_probe_single_connector_modes+0x7d/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #2 (crtc_ww_class_acquire){+.+.}: drm_helper_probe_single_connector_modes+0x58/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #1 (&dev->mode_config.mutex){+.+.}: drm_setup_crtcs+0x10c/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #0 (&helper->lock){+.+.}: __mutex_lock+0x70/0x9d0 drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 kthread+0x11e/0x140 ret_from_fork+0x3a/0x50 other info that might help us debug this: Chain exists of: &helper->lock --> crtc_ww_class_acquire --> crtc_ww_class_mutex Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(crtc_ww_class_mutex); lock(crtc_ww_class_acquire); lock(crtc_ww_class_mutex); lock(&helper->lock); *** DEADLOCK *** 5 locks held by kworker/1:0/18: #0: 000000004a05cd50 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x187/0x650 #1: 00000000601c11d1 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x187/0x650 #2: 00000000586ca0df (&dev->mode_config.mutex){+.+.}, at: drm_modeset_lock_all+0x3a/0x1b0 [drm] #3: 00000000d3ca0ffa (crtc_ww_class_acquire){+.+.}, at: drm_modeset_lock_all+0x44/0x1b0 [drm] #4: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] stack backtrace: CPU: 1 PID: 18 Comm: kworker/1:0 Tainted: G O 4.17.0-rc3Lyude-Test+ #1 Hardware name: Gateway FX6840/FX6840, BIOS P01-A3 05/17/2010 Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper] Call Trace: dump_stack+0x85/0xcb print_circular_bug.isra.38+0x1ce/0x1db __lock_acquire+0x128f/0x1350 ? lock_acquire+0x9f/0x200 ? lock_acquire+0x9f/0x200 ? __ww_mutex_lock.constprop.13+0x8f/0x1000 lock_acquire+0x9f/0x200 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] __mutex_lock+0x70/0x9d0 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? ww_mutex_lock+0x43/0x80 ? _cond_resched+0x15/0x30 ? ww_mutex_lock+0x43/0x80 ? drm_modeset_lock+0xb2/0x130 [drm] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] ? mark_held_locks+0x50/0x80 ? kfree+0xcf/0x2a0 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] ? trace_hardirqs_on_caller+0xed/0x180 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] ? nouveau_connector_aux_xfer+0x7c/0xb0 [nouveau] ? find_held_lock+0x2d/0x90 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] ? __mutex_unlock_slowpath+0x3b/0x280 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 ? process_one_work+0x650/0x650 kthread+0x11e/0x140 ? kthread_create_worker_on_cpu+0x50/0x50 ret_from_fork+0x3a/0x50 Taking example from i915, the only time we need to hold any modesetting locks is when changing the port on the mstc, and in that case we only need to hold the connection mutex. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: Karol Herbst <kherbst@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Lyude Paul <lyude@redhat.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-05-03 06:38:48 +07:00
drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
mstc->port = NULL;
drm/nouveau: Fix deadlock in nv50_mstm_register_connector() Currently; we're grabbing all of the modesetting locks before adding MST connectors to fbdev. This isn't actually necessary, and causes a deadlock as well: ====================================================== WARNING: possible circular locking dependency detected 4.17.0-rc3Lyude-Test+ #1 Tainted: G O ------------------------------------------------------ kworker/1:0/18 is trying to acquire lock: 00000000c832f62d (&helper->lock){+.+.}, at: drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] but task is already holding lock: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (crtc_ww_class_mutex){+.+.}: ww_mutex_lock+0x43/0x80 drm_modeset_lock+0x71/0x130 [drm] drm_helper_probe_single_connector_modes+0x7d/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #2 (crtc_ww_class_acquire){+.+.}: drm_helper_probe_single_connector_modes+0x58/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #1 (&dev->mode_config.mutex){+.+.}: drm_setup_crtcs+0x10c/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #0 (&helper->lock){+.+.}: __mutex_lock+0x70/0x9d0 drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 kthread+0x11e/0x140 ret_from_fork+0x3a/0x50 other info that might help us debug this: Chain exists of: &helper->lock --> crtc_ww_class_acquire --> crtc_ww_class_mutex Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(crtc_ww_class_mutex); lock(crtc_ww_class_acquire); lock(crtc_ww_class_mutex); lock(&helper->lock); *** DEADLOCK *** 5 locks held by kworker/1:0/18: #0: 000000004a05cd50 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x187/0x650 #1: 00000000601c11d1 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x187/0x650 #2: 00000000586ca0df (&dev->mode_config.mutex){+.+.}, at: drm_modeset_lock_all+0x3a/0x1b0 [drm] #3: 00000000d3ca0ffa (crtc_ww_class_acquire){+.+.}, at: drm_modeset_lock_all+0x44/0x1b0 [drm] #4: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] stack backtrace: CPU: 1 PID: 18 Comm: kworker/1:0 Tainted: G O 4.17.0-rc3Lyude-Test+ #1 Hardware name: Gateway FX6840/FX6840, BIOS P01-A3 05/17/2010 Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper] Call Trace: dump_stack+0x85/0xcb print_circular_bug.isra.38+0x1ce/0x1db __lock_acquire+0x128f/0x1350 ? lock_acquire+0x9f/0x200 ? lock_acquire+0x9f/0x200 ? __ww_mutex_lock.constprop.13+0x8f/0x1000 lock_acquire+0x9f/0x200 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] __mutex_lock+0x70/0x9d0 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? ww_mutex_lock+0x43/0x80 ? _cond_resched+0x15/0x30 ? ww_mutex_lock+0x43/0x80 ? drm_modeset_lock+0xb2/0x130 [drm] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] ? mark_held_locks+0x50/0x80 ? kfree+0xcf/0x2a0 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] ? trace_hardirqs_on_caller+0xed/0x180 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] ? nouveau_connector_aux_xfer+0x7c/0xb0 [nouveau] ? find_held_lock+0x2d/0x90 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] ? __mutex_unlock_slowpath+0x3b/0x280 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 ? process_one_work+0x650/0x650 kthread+0x11e/0x140 ? kthread_create_worker_on_cpu+0x50/0x50 ret_from_fork+0x3a/0x50 Taking example from i915, the only time we need to hold any modesetting locks is when changing the port on the mstc, and in that case we only need to hold the connection mutex. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: Karol Herbst <kherbst@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Lyude Paul <lyude@redhat.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-05-03 06:38:48 +07:00
drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
drm_connector_put(&mstc->connector);
}
static void
nv50_mstm_register_connector(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
drm_connector_register(connector);
}
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
{
struct nv50_mstm *mstm = nv50_mstm(mgr);
struct nv50_mstc *mstc;
int ret;
ret = nv50_mstc_new(mstm, port, path, &mstc);
if (ret) {
if (mstc)
mstc->connector.funcs->destroy(&mstc->connector);
return NULL;
}
return &mstc->connector;
}
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
.register_connector = nv50_mstm_register_connector,
.destroy_connector = nv50_mstm_destroy_connector,
.hotplug = nv50_mstm_hotplug,
};
void
nv50_mstm_service(struct nv50_mstm *mstm)
{
struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
bool handled = true;
int ret;
u8 esi[8] = {};
if (!aux)
return;
while (handled) {
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (ret != 8) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
return;
}
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
if (!handled)
break;
drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
}
}
void
nv50_mstm_remove(struct nv50_mstm *mstm)
{
if (mstm)
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
}
static int
nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
{
struct nouveau_encoder *outp = mstm->outp;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_link_v0 mst;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
.base.hasht = outp->dcb->hasht,
.base.hashm = outp->dcb->hashm,
.mst.state = state,
};
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
struct nvif_object *disp = &drm->display->disp.object;
int ret;
if (dpcd >= 0x12) {
ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
if (ret < 0)
return ret;
dpcd &= ~DP_MST_EN;
if (state)
dpcd |= DP_MST_EN;
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
if (ret < 0)
return ret;
}
return nvif_mthd(disp, 0, &args, sizeof(args));
}
int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
int ret, state = 0;
if (!mstm)
return 0;
if (dpcd[0] >= 0x12) {
ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
if (ret < 0)
return ret;
if (!(dpcd[1] & DP_MST_CAP))
dpcd[0] = 0x11;
else
state = allow;
}
ret = nv50_mstm_enable(mstm, dpcd[0], state);
if (ret)
return ret;
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
if (ret)
return nv50_mstm_enable(mstm, dpcd[0], 0);
return mstm->mgr.mst_state;
}
static void
nv50_mstm_fini(struct nv50_mstm *mstm)
{
if (mstm && mstm->mgr.mst_state)
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
}
static void
nv50_mstm_init(struct nv50_mstm *mstm)
{
if (mstm && mstm->mgr.mst_state)
drm_dp_mst_topology_mgr_resume(&mstm->mgr);
}
static void
nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
kfree(*pmstm);
*pmstm = NULL;
}
}
static int
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
int conn_base_id, struct nv50_mstm **pmstm)
{
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
int ret, i;
u8 dpcd;
/* This is a workaround for some monitors not functioning
* correctly in MST mode on initial module load. I think
* some bad interaction with the VBIOS may be responsible.
*
* A good ol' off and on again seems to work here ;)
*/
ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
if (ret >= 0 && dpcd >= 0x12)
drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
return -ENOMEM;
mstm->outp = outp;
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
max_payloads, conn_base_id);
if (ret)
return ret;
for (i = 0; i < max_payloads; i++) {
ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
i, &mstm->msto[i]);
if (ret)
return ret;
}
return 0;
}
/******************************************************************************
* SOR
*****************************************************************************/
static void
nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
struct nv50_head_atom *asyh, u8 proto, u8 depth)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
struct nv50_core *core = disp->core;
if (!asyh) {
nv_encoder->ctrl &= ~BIT(head);
if (!(nv_encoder->ctrl & 0x0000000f))
nv_encoder->ctrl = 0;
} else {
nv_encoder->ctrl |= proto << 8;
nv_encoder->ctrl |= BIT(head);
asyh->or.depth = depth;
}
core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
}
static void
nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
nv_encoder->crtc = NULL;
if (nv_crtc) {
struct nvkm_i2c_aux *aux = nv_encoder->aux;
u8 pwr;
if (aux) {
int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
if (ret == 0) {
pwr &= ~DP_SET_POWER_MASK;
pwr |= DP_SET_POWER_D3;
nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
}
}
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
nv50_audio_disable(encoder, nv_crtc);
nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
nv50_outp_release(nv_encoder);
}
}
static void
nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
} lvds = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
nv50_outp_acquire(nv_encoder);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (nv_encoder->link & 1) {
proto = 0x1;
/* Only enable dual-link if:
* - Need to (i.e. rate > 165MHz)
* - DCB says we can
* - Not an HDMI monitor, since there's no dual-link
* on HDMI.
*/
if (mode->clock >= 165000 &&
nv_encoder->dcb->duallink_possible &&
!drm_detect_hdmi_monitor(nv_connector->edid))
proto |= 0x4;
} else {
proto = 0x2;
}
nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
lvds.lvds.script |= 0x0100;
if (bios->fp.if_is_24bit)
lvds.lvds.script |= 0x0200;
} else {
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
lvds.lvds.script |= 0x0100;
} else
if (mode->clock >= bios->fp.duallink_transition_clk) {
lvds.lvds.script |= 0x0100;
}
if (lvds.lvds.script & 0x0100) {
if (bios->fp.strapless_is_24bit & 2)
lvds.lvds.script |= 0x0200;
} else {
if (bios->fp.strapless_is_24bit & 1)
lvds.lvds.script |= 0x0200;
}
if (nv_connector->base.display_info.bpc == 8)
lvds.lvds.script |= 0x0200;
}
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
if (nv_connector->base.display_info.bpc == 6)
depth = 0x2;
else
if (nv_connector->base.display_info.bpc == 8)
depth = 0x5;
else
depth = 0x6;
if (nv_encoder->link & 1)
proto = 0x8;
else
proto = 0x9;
nv50_audio_enable(encoder, mode);
break;
default:
BUG();
break;
}
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
}
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
.enable = nv50_sor_enable,
.disable = nv50_sor_disable,
};
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
u8 ver, hdr, cnt, len;
u32 data;
int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
default:
type = DRM_MODE_ENCODER_TMDS;
break;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->update = nv50_sor_update;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
* transactions, so we're required to
* use custom I2C-over-AUX code.
*/
nv_encoder->i2c = &aux->i2c;
} else {
nv_encoder->i2c = &nv_connector->aux.ddc;
}
nv_encoder->aux = aux;
}
if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
}
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
}
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
static int
nv50_pior_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
if (ret)
return ret;
crtc_state->adjusted_mode.clock *= 2;
return 0;
}
static void
nv50_pior_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
if (nv_encoder->crtc)
core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
nv50_pior_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u8 owner = 1 << nv_crtc->index;
u8 proto;
nv50_outp_acquire(nv_encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
switch (nv_connector->base.display_info.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
case 6: asyh->or.depth = 0x2; break;
default: asyh->or.depth = 0x0; break;
}
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
proto = 0x0;
break;
default:
BUG();
break;
}
core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
nv_encoder->crtc = encoder->crtc;
}
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
.enable = nv50_pior_enable,
.disable = nv50_pior_disable,
};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
ddc = bus ? &bus->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
case DCB_OUTPUT_DP:
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
ddc = aux ? &aux->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
default:
return -ENODEV;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->i2c = ddc;
nv_encoder->aux = aux;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* Atomic
*****************************************************************************/
static void
nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
{
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
struct drm_encoder *encoder;
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
drm_for_each_encoder(encoder, drm->dev) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
mstm = nouveau_encoder(encoder)->dp.mstm;
if (mstm && mstm->modified)
nv50_mstm_prepare(mstm);
}
}
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
core->func->update(core, interlock, true);
if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
disp->core->chan.base.device))
NV_ERROR(drm, "core notifier timeout\n");
drm_for_each_encoder(encoder, drm->dev) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
mstm = nouveau_encoder(encoder)->dp.mstm;
if (mstm && mstm->modified)
nv50_mstm_cleanup(mstm);
}
}
}
static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct drm_crtc *crtc;
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_atom *atom = nv50_atom(state);
struct nv50_outp_atom *outp, *outt;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
int i;
NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
if (atom->lock_core)
mutex_lock(&disp->mutex);
/* Disable head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
if (old_crtc_state->active && !new_crtc_state->active)
drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
}
}
/* Disable plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
asyw->clr.mask, asyw->set.mask);
if (!asyw->clr.mask)
continue;
nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
}
/* Disable output path(s). */
list_for_each_entry(outp, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
outp->clr.mask, outp->set.mask);
if (outp->clr.mask) {
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
nv50_disp_atomic_commit_core(drm, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
}
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
if (wndw->func->update)
wndw->func->update(wndw, interlock);
}
}
nv50_disp_atomic_commit_core(drm, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
/* Update output path(s). */
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
outp->set.mask, outp->clr.mask);
if (outp->set.mask) {
help->enable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
list_del(&outp->head);
kfree(outp);
}
/* Update head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
asyh->set.mask, asyh->clr.mask);
if (asyh->set.mask) {
nv50_head_flush_set(head, asyh);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
if (new_crtc_state->active) {
if (!old_crtc_state->active)
drm_crtc_vblank_on(crtc);
if (new_crtc_state->event)
drm_crtc_vblank_get(crtc);
}
}
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
asyw->set.mask, asyw->clr.mask);
if ( !asyw->set.mask &&
(!asyw->clr.mask || atom->flush_disable))
continue;
nv50_wndw_flush_set(wndw, interlock, asyw);
}
/* Flush update. */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
if (wndw->func->update)
wndw->func->update(wndw, interlock);
}
}
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
!atom->state.legacy_cursor_update)
nv50_disp_atomic_commit_core(drm, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
if (atom->lock_core)
mutex_unlock(&disp->mutex);
/* Wait for HW to signal completion. */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
int ret = nv50_wndw_wait_armed(wndw, asyw);
if (ret)
NV_ERROR(drm, "%s: timeout\n", plane->name);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
if (new_crtc_state->active)
drm_crtc_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
new_crtc_state->event = NULL;
if (new_crtc_state->active)
drm_crtc_vblank_put(crtc);
}
}
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
}
static void
nv50_disp_atomic_commit_work(struct work_struct *work)
{
struct drm_atomic_state *state =
container_of(work, typeof(*state), commit_work);
nv50_disp_atomic_commit_tail(state);
}
static int
nv50_disp_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
bool active = false;
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
goto done;
INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
goto done;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
goto err_cleanup;
}
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err_cleanup;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
if (asyw->set.image)
nv50_wndw_ntfy_enable(wndw, asyw);
}
drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
nv50_disp_atomic_commit_tail(state);
drm_for_each_crtc(crtc, dev) {
if (crtc->state->enable) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
return 0;
}
active = true;
break;
}
}
if (!active && drm->have_disp_power_ref) {
pm_runtime_put_autosuspend(dev->dev);
drm->have_disp_power_ref = false;
}
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static struct nv50_outp_atom *
nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
{
struct nv50_outp_atom *outp;
list_for_each_entry(outp, &atom->outp, head) {
if (outp->encoder == encoder)
return outp;
}
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
if (!outp)
return ERR_PTR(-ENOMEM);
list_add(&outp->head, &atom->outp);
outp->encoder = encoder;
return outp;
}
static int
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
struct drm_connector_state *old_connector_state)
{
struct drm_encoder *encoder = old_connector_state->best_encoder;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = old_connector_state->crtc))
return 0;
old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
outp->flush_disable = true;
atom->flush_disable = true;
}
outp->clr.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
struct drm_connector_state *connector_state)
{
struct drm_encoder *encoder = connector_state->best_encoder;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = connector_state->crtc))
return 0;
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
outp->set.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct drm_connector_state *old_connector_state, *new_connector_state;
struct drm_connector *connector;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int ret, i;
/* We need to handle colour management on a per-plane basis. */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->color_mgmt_changed) {
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
}
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
if (ret)
return ret;
ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
if (ret)
return ret;
}
return 0;
}
static void
nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct nv50_outp_atom *outp, *outt;
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
list_del(&outp->head);
kfree(outp);
}
drm_atomic_state_default_clear(state);
}
static void
nv50_disp_atomic_state_free(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
drm_atomic_state_default_release(&atom->state);
kfree(atom);
}
static struct drm_atomic_state *
nv50_disp_atomic_state_alloc(struct drm_device *dev)
{
struct nv50_atom *atom;
if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
drm_atomic_state_init(dev, &atom->state) < 0) {
kfree(atom);
return NULL;
}
INIT_LIST_HEAD(&atom->outp);
return &atom->state;
}
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
.atomic_state_clear = nv50_disp_atomic_state_clear,
.atomic_state_free = nv50_disp_atomic_state_free,
};
/******************************************************************************
* Init
*****************************************************************************/
void
nv50_display_fini(struct drm_device *dev)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_fini(wndw);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
nv_encoder = nouveau_encoder(encoder);
nv50_mstm_fini(nv_encoder->dp.mstm);
}
}
}
int
nv50_display_init(struct drm_device *dev)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
struct drm_plane *plane;
core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
nv50_mstm_init(nv_encoder->dp.mstm);
}
}
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_init(wndw);
}
return 0;
}
void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
nouveau_bo_ref(NULL, &disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
}
MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
static int nouveau_atomic = 0;
module_param_named(atomic, nouveau_atomic, int, 0400);
int
nv50_display_create(struct drm_device *dev)
{
struct nvif_device *device = &nouveau_drm(dev)->client.device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
if (nouveau_atomic)
dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
nouveau_bo_unpin(disp->sync);
}
if (ret)
nouveau_bo_ref(NULL, &disp->sync);
}
if (ret)
goto out;
/* allocate master evo channel */
ret = nv50_core_new(drm, &disp->core);
if (ret)
goto out;
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
else
if (disp->disp->object.oclass >= GF110_DISP)
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
else
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
if (!(crtcs & (1 << i)))
continue;
ret = nv50_head_create(dev, i);
if (ret)
goto out;
}
/* create encoder/connector objects based on VBIOS DCB table */
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
connector = nouveau_connector_create(dev, dcbe->connector);
if (IS_ERR(connector))
continue;
if (dcbe->location == DCB_LOC_ON_CHIP) {
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
case DCB_OUTPUT_DP:
ret = nv50_sor_create(connector, dcbe);
break;
case DCB_OUTPUT_ANALOG:
ret = nv50_dac_create(connector, dcbe);
break;
default:
ret = -ENODEV;
break;
}
} else {
ret = nv50_pior_create(connector, dcbe);
}
if (ret) {
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
dcbe->location, dcbe->type,
ffs(dcbe->or) - 1, ret);
ret = 0;
}
}
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
if (connector->encoder_ids[0])
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
}
out:
if (ret)
nv50_display_destroy(dev);
return ret;
}