2014-05-19 11:54:33 +07:00
|
|
|
/*
|
2011-07-04 13:25:18 +07:00
|
|
|
* Copyright 2011 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Ben Skeggs
|
|
|
|
*/
|
2018-05-08 17:39:47 +07:00
|
|
|
#include "disp.h"
|
|
|
|
#include "atom.h"
|
|
|
|
#include "core.h"
|
|
|
|
#include "head.h"
|
|
|
|
#include "wndw.h"
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2011-07-05 07:33:08 +07:00
|
|
|
#include <linux/dma-mapping.h>
|
2017-04-12 00:11:18 +07:00
|
|
|
#include <linux/hdmi.h>
|
2011-07-05 10:08:40 +07:00
|
|
|
|
2012-10-03 00:01:07 +07:00
|
|
|
#include <drm/drmP.h>
|
2016-11-04 14:20:36 +07:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2014-05-30 22:48:06 +07:00
|
|
|
#include <drm/drm_dp_helper.h>
|
2015-12-04 15:45:43 +07:00
|
|
|
#include <drm/drm_fb_helper.h>
|
2016-11-04 14:20:36 +07:00
|
|
|
#include <drm/drm_plane_helper.h>
|
2019-01-18 04:03:34 +07:00
|
|
|
#include <drm/drm_probe_helper.h>
|
2018-09-04 07:57:36 +07:00
|
|
|
#include <drm/drm_scdc_helper.h>
|
2017-04-12 00:11:18 +07:00
|
|
|
#include <drm/drm_edid.h>
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
#include <nvif/class.h>
|
|
|
|
#include <nvif/cl0002.h>
|
|
|
|
#include <nvif/cl5070.h>
|
|
|
|
#include <nvif/cl507d.h>
|
|
|
|
#include <nvif/event.h>
|
2018-01-12 14:05:33 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
#include "nouveau_drv.h"
|
|
|
|
#include "nouveau_dma.h"
|
|
|
|
#include "nouveau_gem.h"
|
|
|
|
#include "nouveau_connector.h"
|
|
|
|
#include "nouveau_encoder.h"
|
|
|
|
#include "nouveau_fence.h"
|
|
|
|
#include "nouveau_fbcon.h"
|
2018-01-12 14:05:33 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
#include <subdev/bios/dp.h>
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* Atomic state
|
|
|
|
*****************************************************************************/
|
2018-01-12 14:05:33 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_outp_atom {
|
|
|
|
struct list_head head;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
struct drm_encoder *encoder;
|
|
|
|
bool flush_disable;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
union nv50_outp_atom_mask {
|
2018-05-08 17:39:47 +07:00
|
|
|
struct {
|
|
|
|
bool ctrl:1;
|
|
|
|
};
|
|
|
|
u8 mask;
|
2018-05-08 17:39:47 +07:00
|
|
|
} set, clr;
|
2018-05-08 17:39:47 +07:00
|
|
|
};
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* EVO channel
|
|
|
|
*****************************************************************************/
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
static int
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
|
|
|
|
const s32 *oclass, u8 head, void *data, u32 size,
|
|
|
|
struct nv50_chan *chan)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nvif_sclass *sclass;
|
|
|
|
int ret, i, n;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
chan->device = device;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = n = nvif_object_sclass_get(disp, &sclass);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
while (oclass[0]) {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (sclass[i].oclass == oclass[0]) {
|
|
|
|
ret = nvif_object_init(disp, 0, oclass[0],
|
|
|
|
data, size, &chan->user);
|
|
|
|
if (ret == 0)
|
|
|
|
nvif_object_map(&chan->user, NULL, 0);
|
|
|
|
nvif_object_sclass_put(&sclass);
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
2018-05-08 17:39:47 +07:00
|
|
|
oclass++;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_object_sclass_put(&sclass);
|
|
|
|
return -ENOSYS;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_chan_destroy(struct nv50_chan *chan)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_object_fini(&chan->user);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* DMA EVO channel
|
|
|
|
*****************************************************************************/
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
void
|
|
|
|
nv50_dmac_destroy(struct nv50_dmac *dmac)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_object_fini(&dmac->vram);
|
|
|
|
nvif_object_fini(&dmac->sync);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_chan_destroy(&dmac->base);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mem_fini(&dmac->push);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
int
|
|
|
|
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
|
|
|
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
|
|
|
|
struct nv50_dmac *dmac)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nouveau_cli *cli = (void *)device->object.client;
|
|
|
|
struct nv50_disp_core_channel_dma_v0 *args = data;
|
2018-07-18 06:33:39 +07:00
|
|
|
u8 type = NVIF_MEM_COHERENT;
|
2018-05-08 17:39:47 +07:00
|
|
|
int ret;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
mutex_init(&dmac->lock);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-07-18 06:33:39 +07:00
|
|
|
/* Pascal added support for 47-bit physical addresses, but some
|
|
|
|
* parts of EVO still only accept 40-bit PAs.
|
|
|
|
*
|
|
|
|
* To avoid issues on systems with large amounts of RAM, and on
|
|
|
|
* systems where an IOMMU maps pages at a high address, we need
|
|
|
|
* to allocate push buffers in VRAM instead.
|
|
|
|
*
|
|
|
|
* This appears to match NVIDIA's behaviour on Pascal.
|
|
|
|
*/
|
|
|
|
if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
|
|
|
|
type |= NVIF_MEM_VRAM;
|
|
|
|
|
|
|
|
ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
|
2018-05-08 17:39:47 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-07-05 13:48:06 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
dmac->ptr = dmac->push.object.map.ptr;
|
2011-07-05 13:48:06 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
args->pushbuf = nvif_handle(&dmac->push.object);
|
2011-07-05 13:48:06 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nv50_chan_create(device, disp, oclass, head, data, size,
|
|
|
|
&dmac->base);
|
|
|
|
if (ret)
|
2016-11-04 14:20:36 +07:00
|
|
|
return ret;
|
|
|
|
|
2018-05-08 17:39:48 +07:00
|
|
|
if (!syncbuf)
|
|
|
|
return 0;
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
|
|
|
|
&(struct nv_dma_v0) {
|
|
|
|
.target = NV_DMA_V0_TARGET_VRAM,
|
|
|
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
|
|
|
.start = syncbuf + 0x0000,
|
|
|
|
.limit = syncbuf + 0x0fff,
|
|
|
|
}, sizeof(struct nv_dma_v0),
|
|
|
|
&dmac->sync);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-10-16 11:18:32 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
|
|
|
|
&(struct nv_dma_v0) {
|
|
|
|
.target = NV_DMA_V0_TARGET_VRAM,
|
|
|
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
|
|
|
.start = 0,
|
|
|
|
.limit = device->info.ram_user - 1,
|
|
|
|
}, sizeof(struct nv_dma_v0),
|
|
|
|
&dmac->vram);
|
2011-07-05 13:48:06 +07:00
|
|
|
if (ret)
|
2018-05-08 17:39:47 +07:00
|
|
|
return ret;
|
|
|
|
|
2011-07-05 13:48:06 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* EVO channel helpers
|
|
|
|
*****************************************************************************/
|
2018-12-12 13:51:17 +07:00
|
|
|
static void
|
|
|
|
evo_flush(struct nv50_dmac *dmac)
|
|
|
|
{
|
|
|
|
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
|
|
|
* writes have been flushed right through to VRAM before writing PUT.
|
|
|
|
*/
|
|
|
|
if (dmac->push.type & NVIF_MEM_VRAM) {
|
|
|
|
struct nvif_device *device = dmac->base.device;
|
|
|
|
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
|
|
|
nvif_msec(device, 2000,
|
|
|
|
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
|
|
|
break;
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
u32 *
|
|
|
|
evo_wait(struct nv50_dmac *evoc, int nr)
|
2018-05-08 17:39:47 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_dmac *dmac = evoc;
|
|
|
|
struct nvif_device *device = dmac->base.device;
|
|
|
|
u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
|
2018-05-08 17:39:47 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
mutex_lock(&dmac->lock);
|
|
|
|
if (put + nr >= (PAGE_SIZE / 4) - 8) {
|
|
|
|
dmac->ptr[put] = 0x20000000;
|
2018-12-12 13:51:17 +07:00
|
|
|
evo_flush(dmac);
|
2018-05-08 17:39:47 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
|
|
|
|
if (nvif_msec(device, 2000,
|
|
|
|
if (!nvif_rd32(&dmac->base.user, 0x0004))
|
|
|
|
break;
|
|
|
|
) < 0) {
|
|
|
|
mutex_unlock(&dmac->lock);
|
|
|
|
pr_err("nouveau: evo channel stalled\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-05-08 17:39:47 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
put = 0;
|
2018-05-08 17:39:47 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
return dmac->ptr + put;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evo_kick(u32 *push, struct nv50_dmac *evoc)
|
|
|
|
{
|
|
|
|
struct nv50_dmac *dmac = evoc;
|
2018-07-18 06:33:39 +07:00
|
|
|
|
2018-12-12 13:51:17 +07:00
|
|
|
evo_flush(dmac);
|
2018-07-18 06:33:39 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
|
|
|
|
mutex_unlock(&dmac->lock);
|
2018-05-08 17:39:47 +07:00
|
|
|
}
|
|
|
|
|
2014-12-22 13:30:13 +07:00
|
|
|
/******************************************************************************
|
2016-11-04 14:20:36 +07:00
|
|
|
* Output path helpers
|
2014-12-22 13:30:13 +07:00
|
|
|
*****************************************************************************/
|
2017-05-19 20:59:35 +07:00
|
|
|
static void
|
|
|
|
nv50_outp_release(struct nouveau_encoder *nv_encoder)
|
|
|
|
{
|
|
|
|
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_RELEASE,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2017-05-19 20:59:35 +07:00
|
|
|
nv_encoder->or = -1;
|
|
|
|
nv_encoder->link = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
|
|
|
struct nv50_disp *disp = nv50_disp(drm->dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_acquire_v0 info;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2017-05-19 20:59:35 +07:00
|
|
|
if (ret) {
|
|
|
|
NV_ERROR(drm, "error acquiring output path: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_encoder->or = args.info.or;
|
|
|
|
nv_encoder->link = args.info.link;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static int
|
|
|
|
nv50_outp_atomic_check_view(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state,
|
|
|
|
struct drm_display_mode *native_mode)
|
|
|
|
{
|
|
|
|
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
|
|
|
|
struct drm_display_mode *mode = &crtc_state->mode;
|
|
|
|
struct drm_connector *connector = conn_state->connector;
|
|
|
|
struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
|
|
|
|
asyc->scaler.full = false;
|
|
|
|
if (!native_mode)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
|
|
|
|
switch (connector->connector_type) {
|
|
|
|
case DRM_MODE_CONNECTOR_LVDS:
|
|
|
|
case DRM_MODE_CONNECTOR_eDP:
|
|
|
|
/* Force use of scaler for non-EDID modes. */
|
|
|
|
if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
|
|
|
|
break;
|
|
|
|
mode = native_mode;
|
|
|
|
asyc->scaler.full = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mode = native_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!drm_mode_equal(adjusted_mode, mode)) {
|
|
|
|
drm_mode_copy(adjusted_mode, mode);
|
|
|
|
crtc_state->mode_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static int
|
|
|
|
nv50_outp_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
2014-12-22 13:30:13 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nouveau_connector *nv_connector =
|
|
|
|
nouveau_connector(conn_state->connector);
|
|
|
|
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
|
|
|
nv_connector->native_mode);
|
2014-12-22 13:30:13 +07:00
|
|
|
}
|
|
|
|
|
2011-07-04 13:25:18 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* DAC
|
|
|
|
*****************************************************************************/
|
2018-05-08 17:39:47 +07:00
|
|
|
static void
|
|
|
|
nv50_dac_disable(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
|
|
|
if (nv_encoder->crtc)
|
|
|
|
core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
|
2016-11-04 14:20:36 +07:00
|
|
|
nv_encoder->crtc = NULL;
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_release(nv_encoder);
|
2011-07-06 12:25:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_dac_enable(struct drm_encoder *encoder)
|
2011-07-06 12:25:47 +07:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2011-07-06 12:25:47 +07:00
|
|
|
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_acquire(nv_encoder);
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
|
2018-05-08 17:39:47 +07:00
|
|
|
asyh->or.depth = 0;
|
2011-07-06 12:25:47 +07:00
|
|
|
|
|
|
|
nv_encoder->crtc = encoder->crtc;
|
|
|
|
}
|
|
|
|
|
2011-07-07 06:51:29 +07:00
|
|
|
static enum drm_connector_status
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
|
2011-07-07 06:51:29 +07:00
|
|
|
{
|
2014-08-10 01:10:26 +07:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 01:10:26 +07:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_dac_load_v0 load;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
|
|
|
|
if (args.load.data == 0)
|
|
|
|
args.load.data = 340;
|
2011-07-08 08:14:50 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2014-08-10 01:10:26 +07:00
|
|
|
if (ret || !args.load.load)
|
2012-11-08 09:08:55 +07:00
|
|
|
return connector_status_disconnected;
|
2011-07-08 08:14:50 +07:00
|
|
|
|
2012-11-08 09:08:55 +07:00
|
|
|
return connector_status_connected;
|
2011-07-07 06:51:29 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_dac_help = {
|
2016-11-04 14:20:36 +07:00
|
|
|
.atomic_check = nv50_outp_atomic_check,
|
|
|
|
.enable = nv50_dac_enable,
|
|
|
|
.disable = nv50_dac_disable,
|
2012-11-21 11:40:21 +07:00
|
|
|
.detect = nv50_dac_detect
|
2011-07-06 12:25:47 +07:00
|
|
|
};
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
|
|
|
nv50_dac_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
drm_encoder_cleanup(encoder);
|
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_dac_func = {
|
2012-11-21 11:40:21 +07:00
|
|
|
.destroy = nv50_dac_destroy,
|
2011-07-06 12:25:47 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
2011-07-06 12:25:47 +07:00
|
|
|
{
|
2013-02-11 17:15:03 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
2016-05-18 10:57:42 +07:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2015-08-20 11:54:15 +07:00
|
|
|
struct nvkm_i2c_bus *bus;
|
2011-07-06 12:25:47 +07:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
2013-02-11 17:15:03 +07:00
|
|
|
int type = DRM_MODE_ENCODER_DAC;
|
2011-07-06 12:25:47 +07:00
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
2015-08-20 11:54:15 +07:00
|
|
|
|
|
|
|
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
|
|
|
if (bus)
|
|
|
|
nv_encoder->i2c = &bus->i2c;
|
2011-07-06 12:25:47 +07:00
|
|
|
|
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
|
|
|
|
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_dac_help);
|
2011-07-06 12:25:47 +07:00
|
|
|
|
2018-07-09 15:40:07 +07:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2011-07-06 12:25:47 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2011-11-11 15:13:13 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* Audio
|
|
|
|
*****************************************************************************/
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_hda_eld_v0 eld;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
|
|
|
};
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
|
2011-11-11 15:13:13 +07:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2014-09-15 18:29:05 +07:00
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2011-11-11 15:13:13 +07:00
|
|
|
struct nouveau_connector *nv_connector;
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-09-15 18:11:51 +07:00
|
|
|
struct __packed {
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 mthd;
|
|
|
|
struct nv50_disp_sor_hda_eld_v0 eld;
|
|
|
|
} base;
|
2014-08-10 01:10:26 +07:00
|
|
|
u8 data[sizeof(nv_connector->base.eld)];
|
|
|
|
} args = {
|
2014-09-15 18:11:51 +07:00
|
|
|
.base.mthd.version = 1,
|
|
|
|
.base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
|
|
|
|
.base.mthd.hasht = nv_encoder->dcb->hasht,
|
2014-09-15 18:29:05 +07:00
|
|
|
.base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
2014-08-10 01:10:26 +07:00
|
|
|
};
|
2011-11-11 15:13:13 +07:00
|
|
|
|
|
|
|
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
|
|
|
if (!drm_detect_monitor_audio(nv_connector->edid))
|
|
|
|
return;
|
|
|
|
|
2014-08-10 01:10:26 +07:00
|
|
|
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
|
2011-11-11 15:13:13 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args,
|
2014-10-28 21:20:48 +07:00
|
|
|
sizeof(args.base) + drm_eld_size(args.data));
|
2011-11-11 15:13:13 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* HDMI
|
|
|
|
*****************************************************************************/
|
2011-11-11 15:13:13 +07:00
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
2011-11-11 15:13:13 +07:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 01:10:26 +07:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
|
2014-08-10 01:10:26 +07:00
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
2016-11-04 14:20:36 +07:00
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
2014-08-10 01:10:26 +07:00
|
|
|
};
|
2011-11-11 15:13:13 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2011-11-11 15:13:13 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
|
2011-11-11 15:13:13 +07:00
|
|
|
{
|
2018-09-04 07:57:36 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
2011-11-11 16:51:20 +07:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 01:10:26 +07:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
|
2017-04-12 00:11:18 +07:00
|
|
|
u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
|
2014-08-10 01:10:26 +07:00
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
|
|
|
.pwr.state = 1,
|
|
|
|
.pwr.rekey = 56, /* binary driver, and tegra, constant */
|
|
|
|
};
|
|
|
|
struct nouveau_connector *nv_connector;
|
2018-09-04 07:57:36 +07:00
|
|
|
struct drm_hdmi_info *hdmi;
|
2011-11-11 16:51:20 +07:00
|
|
|
u32 max_ac_packet;
|
2017-04-12 00:11:18 +07:00
|
|
|
union hdmi_infoframe avi_frame;
|
|
|
|
union hdmi_infoframe vendor_frame;
|
2019-01-09 00:28:25 +07:00
|
|
|
bool high_tmds_clock_ratio = false, scrambling = false;
|
2018-09-04 07:57:36 +07:00
|
|
|
u8 config;
|
2017-04-12 00:11:18 +07:00
|
|
|
int ret;
|
|
|
|
int size;
|
2011-11-11 16:51:20 +07:00
|
|
|
|
|
|
|
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
|
|
|
if (!drm_detect_hdmi_monitor(nv_connector->edid))
|
|
|
|
return;
|
|
|
|
|
2018-09-04 07:57:36 +07:00
|
|
|
hdmi = &nv_connector->base.display_info.hdmi;
|
|
|
|
|
2019-01-09 00:28:25 +07:00
|
|
|
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
|
|
|
|
&nv_connector->base, mode);
|
2017-04-12 00:11:18 +07:00
|
|
|
if (!ret) {
|
|
|
|
/* We have an AVI InfoFrame, populate it to the display */
|
|
|
|
args.pwr.avi_infoframe_length
|
|
|
|
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
|
|
|
|
}
|
|
|
|
|
2017-11-14 00:04:19 +07:00
|
|
|
ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
|
|
|
|
&nv_connector->base, mode);
|
2017-04-12 00:11:18 +07:00
|
|
|
if (!ret) {
|
|
|
|
/* We have a Vendor InfoFrame, populate it to the display */
|
|
|
|
args.pwr.vendor_infoframe_length
|
|
|
|
= hdmi_infoframe_pack(&vendor_frame,
|
|
|
|
args.infoframes
|
|
|
|
+ args.pwr.avi_infoframe_length,
|
|
|
|
17);
|
|
|
|
}
|
|
|
|
|
2011-11-11 16:51:20 +07:00
|
|
|
max_ac_packet = mode->htotal - mode->hdisplay;
|
2014-08-10 01:10:26 +07:00
|
|
|
max_ac_packet -= args.pwr.rekey;
|
2011-11-11 16:51:20 +07:00
|
|
|
max_ac_packet -= 18; /* constant from tegra */
|
2014-08-10 01:10:26 +07:00
|
|
|
args.pwr.max_ac_packet = max_ac_packet / 32;
|
2011-11-11 17:46:00 +07:00
|
|
|
|
2018-09-04 07:57:36 +07:00
|
|
|
if (hdmi->scdc.scrambling.supported) {
|
|
|
|
high_tmds_clock_ratio = mode->clock > 340000;
|
|
|
|
scrambling = high_tmds_clock_ratio ||
|
|
|
|
hdmi->scdc.scrambling.low_rates;
|
|
|
|
}
|
|
|
|
|
|
|
|
args.pwr.scdc =
|
|
|
|
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE * scrambling |
|
|
|
|
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 * high_tmds_clock_ratio;
|
|
|
|
|
2017-04-12 00:11:18 +07:00
|
|
|
size = sizeof(args.base)
|
|
|
|
+ sizeof(args.pwr)
|
|
|
|
+ args.pwr.avi_infoframe_length
|
|
|
|
+ args.pwr.vendor_infoframe_length;
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, size);
|
2018-09-04 07:57:36 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_audio_enable(encoder, mode);
|
2018-09-04 07:57:36 +07:00
|
|
|
|
|
|
|
/* If SCDC is supported by the downstream monitor, update
|
|
|
|
* divider / scrambling settings to what we programmed above.
|
|
|
|
*/
|
|
|
|
if (!hdmi->scdc.scrambling.supported)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config);
|
|
|
|
if (ret < 0) {
|
|
|
|
NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
|
|
|
|
config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 * high_tmds_clock_ratio;
|
|
|
|
config |= SCDC_SCRAMBLING_ENABLE * scrambling;
|
|
|
|
ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config);
|
|
|
|
if (ret < 0)
|
|
|
|
NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
|
|
|
|
config, ret);
|
2011-11-11 15:13:13 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* MST
|
|
|
|
*****************************************************************************/
|
2016-11-04 14:20:36 +07:00
|
|
|
#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
|
|
|
|
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
|
|
|
|
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_mstm {
|
|
|
|
struct nouveau_encoder *outp;
|
|
|
|
|
|
|
|
struct drm_dp_mst_topology_mgr mgr;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_msto *msto[4];
|
|
|
|
|
|
|
|
bool modified;
|
2017-05-19 20:59:35 +07:00
|
|
|
bool disabled;
|
|
|
|
int links;
|
2016-11-04 14:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nv50_mstc {
|
|
|
|
struct nv50_mstm *mstm;
|
|
|
|
struct drm_dp_mst_port *port;
|
|
|
|
struct drm_connector connector;
|
|
|
|
|
|
|
|
struct drm_display_mode *native;
|
|
|
|
struct edid *edid;
|
2016-11-04 14:20:36 +07:00
|
|
|
};
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_msto {
|
|
|
|
struct drm_encoder encoder;
|
|
|
|
|
|
|
|
struct nv50_head *head;
|
|
|
|
struct nv50_mstc *mstc;
|
|
|
|
bool disabled;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct drm_dp_payload *
|
|
|
|
nv50_msto_payload(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
int vcpi = mstc->port->vcpi.vcpi, i;
|
|
|
|
|
2019-01-11 07:53:39 +07:00
|
|
|
WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
|
|
|
|
for (i = 0; i < mstm->mgr.max_payloads; i++) {
|
|
|
|
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
|
|
|
|
NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
|
|
|
|
mstm->outp->base.base.name, i, payload->vcpi,
|
|
|
|
payload->start_slot, payload->num_slots);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < mstm->mgr.max_payloads; i++) {
|
|
|
|
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
|
|
|
|
if (payload->vcpi == vcpi)
|
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_cleanup(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
|
2019-01-11 07:53:36 +07:00
|
|
|
if (!msto->disabled)
|
|
|
|
return;
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
|
2019-01-11 07:53:36 +07:00
|
|
|
|
2019-01-11 07:53:38 +07:00
|
|
|
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
|
2019-01-11 07:53:36 +07:00
|
|
|
|
|
|
|
msto->mstc = NULL;
|
|
|
|
msto->head = NULL;
|
|
|
|
msto->disabled = false;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_prepare(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
|
|
|
|
.base.hasht = mstm->outp->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
|
|
|
|
(0x0100 << msto->head->base.index),
|
|
|
|
};
|
|
|
|
|
2019-01-11 07:53:39 +07:00
|
|
|
mutex_lock(&mstm->mgr.payload_lock);
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
2019-01-11 07:53:38 +07:00
|
|
|
if (mstc->port->vcpi.vcpi > 0) {
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_dp_payload *payload = nv50_msto_payload(msto);
|
|
|
|
if (payload) {
|
|
|
|
args.vcpi.start_slot = payload->start_slot;
|
|
|
|
args.vcpi.num_slots = payload->num_slots;
|
|
|
|
args.vcpi.pbn = mstc->port->vcpi.pbn;
|
|
|
|
args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
|
|
|
|
msto->encoder.name, msto->head->base.base.name,
|
|
|
|
args.vcpi.start_slot, args.vcpi.num_slots,
|
|
|
|
args.vcpi.pbn, args.vcpi.aligned_pbn);
|
2019-01-11 07:53:39 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
|
2019-01-11 07:53:39 +07:00
|
|
|
mutex_unlock(&mstm->mgr.payload_lock);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_msto_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
|
|
|
{
|
2019-01-11 07:53:43 +07:00
|
|
|
struct drm_atomic_state *state = crtc_state->state;
|
|
|
|
struct drm_connector *connector = conn_state->connector;
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
2019-02-02 07:20:04 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
int slots;
|
|
|
|
|
2019-02-02 07:20:04 +07:00
|
|
|
/* When restoring duplicated states, we need to make sure that the
|
|
|
|
* bw remains the same and avoid recalculating it, as the connector's
|
|
|
|
* bpc may have changed after the state was duplicated
|
|
|
|
*/
|
|
|
|
if (!state->duplicated)
|
|
|
|
asyh->dp.pbn =
|
|
|
|
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
|
|
|
|
connector->display_info.bpc * 3);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2019-02-02 07:20:02 +07:00
|
|
|
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
|
2019-01-11 07:53:43 +07:00
|
|
|
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
2019-02-02 07:20:04 +07:00
|
|
|
mstc->port,
|
|
|
|
asyh->dp.pbn);
|
2019-01-11 07:53:43 +07:00
|
|
|
if (slots < 0)
|
|
|
|
return slots;
|
2019-02-02 07:20:04 +07:00
|
|
|
|
|
|
|
asyh->dp.tu = slots;
|
2019-01-11 07:53:43 +07:00
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
|
|
|
mstc->native);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_enable(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_head *head = nv50_head(encoder->crtc);
|
2019-02-02 07:20:04 +07:00
|
|
|
struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = NULL;
|
|
|
|
struct nv50_mstm *mstm = NULL;
|
|
|
|
struct drm_connector *connector;
|
2017-05-12 02:10:46 +07:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2016-11-04 14:20:36 +07:00
|
|
|
u8 proto, depth;
|
|
|
|
bool r;
|
|
|
|
|
2017-05-12 02:10:46 +07:00
|
|
|
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
|
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
2016-11-04 14:20:36 +07:00
|
|
|
if (connector->state->best_encoder == &msto->encoder) {
|
|
|
|
mstc = nv50_mstc(connector);
|
|
|
|
mstm = mstc->mstm;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-05-12 02:10:46 +07:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
if (WARN_ON(!mstc))
|
|
|
|
return;
|
|
|
|
|
2019-02-02 07:20:04 +07:00
|
|
|
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
|
|
|
|
armh->dp.tu);
|
2019-01-29 04:03:50 +07:00
|
|
|
if (!r)
|
|
|
|
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2017-05-19 20:59:35 +07:00
|
|
|
if (!mstm->links++)
|
|
|
|
nv50_outp_acquire(mstm->outp);
|
|
|
|
|
|
|
|
if (mstm->outp->link & 1)
|
2016-11-04 14:20:36 +07:00
|
|
|
proto = 0x8;
|
|
|
|
else
|
|
|
|
proto = 0x9;
|
|
|
|
|
|
|
|
switch (mstc->connector.display_info.bpc) {
|
|
|
|
case 6: depth = 0x2; break;
|
|
|
|
case 8: depth = 0x5; break;
|
|
|
|
case 10:
|
|
|
|
default: depth = 0x6; break;
|
|
|
|
}
|
|
|
|
|
2019-02-02 07:20:04 +07:00
|
|
|
mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
msto->head = head;
|
|
|
|
msto->mstc = mstc;
|
|
|
|
mstm->modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_disable(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
|
2019-01-11 07:53:38 +07:00
|
|
|
drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
|
|
|
|
mstm->modified = true;
|
2017-05-19 20:59:35 +07:00
|
|
|
if (!--mstm->links)
|
|
|
|
mstm->disabled = true;
|
2016-11-04 14:20:36 +07:00
|
|
|
msto->disabled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_msto_help = {
|
|
|
|
.disable = nv50_msto_disable,
|
|
|
|
.enable = nv50_msto_enable,
|
|
|
|
.atomic_check = nv50_msto_atomic_check,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
drm_encoder_cleanup(&msto->encoder);
|
|
|
|
kfree(msto);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_msto = {
|
|
|
|
.destroy = nv50_msto_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
|
|
|
|
struct nv50_msto **pmsto)
|
|
|
|
{
|
|
|
|
struct nv50_msto *msto;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
|
|
|
|
DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
|
|
|
|
if (ret) {
|
|
|
|
kfree(*pmsto);
|
|
|
|
*pmsto = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
|
|
|
|
msto->encoder.possible_crtcs = heads;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_encoder *
|
|
|
|
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
|
|
|
|
struct drm_connector_state *connector_state)
|
|
|
|
{
|
|
|
|
struct nv50_head *head = nv50_head(connector_state->crtc);
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2018-10-09 06:24:31 +07:00
|
|
|
|
|
|
|
return &mstc->mstm->msto[head->base.index]->encoder;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_encoder *
|
|
|
|
nv50_mstc_best_encoder(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2018-10-09 06:24:31 +07:00
|
|
|
|
|
|
|
return &mstc->mstm->msto[0]->encoder;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum drm_mode_status
|
|
|
|
nv50_mstc_mode_valid(struct drm_connector *connector,
|
|
|
|
struct drm_display_mode *mode)
|
|
|
|
{
|
|
|
|
return MODE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstc_get_modes(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
|
2018-07-09 15:40:06 +07:00
|
|
|
drm_connector_update_edid_property(&mstc->connector, mstc->edid);
|
2017-11-01 21:21:02 +07:00
|
|
|
if (mstc->edid)
|
2016-11-04 14:20:36 +07:00
|
|
|
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
|
|
|
|
|
|
|
|
if (!mstc->connector.display_info.bpc)
|
|
|
|
mstc->connector.display_info.bpc = 8;
|
|
|
|
|
|
|
|
if (mstc->native)
|
|
|
|
drm_mode_destroy(mstc->connector.dev, mstc->native);
|
|
|
|
mstc->native = nouveau_conn_native_mode(&mstc->connector);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-11 07:53:43 +07:00
|
|
|
static int
|
|
|
|
nv50_mstc_atomic_check(struct drm_connector *connector,
|
|
|
|
struct drm_connector_state *new_conn_state)
|
|
|
|
{
|
|
|
|
struct drm_atomic_state *state = new_conn_state->state;
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
|
|
|
|
struct drm_connector_state *old_conn_state =
|
|
|
|
drm_atomic_get_old_connector_state(state, connector);
|
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
struct drm_crtc *new_crtc = new_conn_state->crtc;
|
|
|
|
|
|
|
|
if (!old_conn_state->crtc)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* We only want to free VCPI if this state disables the CRTC on this
|
|
|
|
* connector
|
|
|
|
*/
|
|
|
|
if (new_crtc) {
|
|
|
|
crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
|
|
|
|
|
|
|
|
if (!crtc_state ||
|
|
|
|
!drm_atomic_crtc_needs_modeset(crtc_state) ||
|
|
|
|
crtc_state->enable)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static const struct drm_connector_helper_funcs
|
|
|
|
nv50_mstc_help = {
|
|
|
|
.get_modes = nv50_mstc_get_modes,
|
|
|
|
.mode_valid = nv50_mstc_mode_valid,
|
|
|
|
.best_encoder = nv50_mstc_best_encoder,
|
|
|
|
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
|
2019-01-11 07:53:43 +07:00
|
|
|
.atomic_check = nv50_mstc_atomic_check,
|
2016-11-04 14:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static enum drm_connector_status
|
|
|
|
nv50_mstc_detect(struct drm_connector *connector, bool force)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2018-09-15 03:44:03 +07:00
|
|
|
enum drm_connector_status conn_status;
|
|
|
|
int ret;
|
|
|
|
|
2019-01-11 07:53:38 +07:00
|
|
|
if (drm_connector_is_unregistered(connector))
|
2016-11-04 14:20:36 +07:00
|
|
|
return connector_status_disconnected;
|
2018-09-15 03:44:03 +07:00
|
|
|
|
|
|
|
ret = pm_runtime_get_sync(connector->dev->dev);
|
|
|
|
if (ret < 0 && ret != -EACCES)
|
|
|
|
return connector_status_disconnected;
|
|
|
|
|
|
|
|
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
|
|
|
|
mstc->port);
|
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(connector->dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(connector->dev->dev);
|
|
|
|
return conn_status;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstc_destroy(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2019-01-11 07:53:37 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_connector_cleanup(&mstc->connector);
|
2019-01-11 07:53:38 +07:00
|
|
|
drm_dp_mst_put_port_malloc(mstc->port);
|
2019-01-11 07:53:37 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
kfree(mstc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_connector_funcs
|
|
|
|
nv50_mstc = {
|
|
|
|
.reset = nouveau_conn_reset,
|
|
|
|
.detect = nv50_mstc_detect,
|
|
|
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
|
|
|
.destroy = nv50_mstc_destroy,
|
|
|
|
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
|
|
|
|
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
|
|
|
|
.atomic_set_property = nouveau_conn_atomic_set_property,
|
|
|
|
.atomic_get_property = nouveau_conn_atomic_get_property,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
|
|
|
|
const char *path, struct nv50_mstc **pmstc)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = mstm->outp->base.base.dev;
|
|
|
|
struct nv50_mstc *mstc;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
|
|
|
|
return -ENOMEM;
|
|
|
|
mstc->mstm = mstm;
|
|
|
|
mstc->port = port;
|
|
|
|
|
|
|
|
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
|
|
|
|
DRM_MODE_CONNECTOR_DisplayPort);
|
|
|
|
if (ret) {
|
|
|
|
kfree(*pmstc);
|
|
|
|
*pmstc = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
|
|
|
|
|
|
|
|
mstc->connector.funcs->reset(&mstc->connector);
|
|
|
|
nouveau_conn_attach_properties(&mstc->connector);
|
|
|
|
|
2017-08-18 05:03:23 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
|
2018-07-09 15:40:07 +07:00
|
|
|
drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
|
|
|
|
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
|
2018-07-09 15:40:08 +07:00
|
|
|
drm_connector_set_path_property(&mstc->connector, path);
|
2019-01-11 07:53:37 +07:00
|
|
|
drm_dp_mst_get_port_malloc(port);
|
2016-11-04 14:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_cleanup(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
|
|
|
|
ret = drm_dp_check_act_status(&mstm->mgr);
|
|
|
|
|
|
|
|
ret = drm_dp_update_payload_part2(&mstm->mgr);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
|
|
|
|
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
if (mstc && mstc->mstm == mstm)
|
|
|
|
nv50_msto_cleanup(msto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mstm->modified = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_prepare(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
|
|
|
|
ret = drm_dp_update_payload_part1(&mstm->mgr);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
|
|
|
|
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
if (mstc && mstc->mstm == mstm)
|
|
|
|
nv50_msto_prepare(msto);
|
|
|
|
}
|
|
|
|
}
|
2017-05-19 20:59:35 +07:00
|
|
|
|
|
|
|
if (mstm->disabled) {
|
|
|
|
if (!mstm->links)
|
|
|
|
nv50_outp_release(mstm->outp);
|
|
|
|
mstm->disabled = false;
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|
|
|
struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
|
|
|
|
drm_connector_unregister(&mstc->connector);
|
|
|
|
|
|
|
|
drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
|
drm/nouveau: Fix deadlock in nv50_mstm_register_connector()
Currently; we're grabbing all of the modesetting locks before adding MST
connectors to fbdev. This isn't actually necessary, and causes a
deadlock as well:
======================================================
WARNING: possible circular locking dependency detected
4.17.0-rc3Lyude-Test+ #1 Tainted: G O
------------------------------------------------------
kworker/1:0/18 is trying to acquire lock:
00000000c832f62d (&helper->lock){+.+.}, at: drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
but task is already holding lock:
00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm]
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (crtc_ww_class_mutex){+.+.}:
ww_mutex_lock+0x43/0x80
drm_modeset_lock+0x71/0x130 [drm]
drm_helper_probe_single_connector_modes+0x7d/0x6b0 [drm_kms_helper]
drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper]
__drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
nouveau_fbcon_init+0x138/0x1a0 [nouveau]
nouveau_drm_load+0x173/0x7e0 [nouveau]
drm_dev_register+0x134/0x1c0 [drm]
drm_get_pci_dev+0x8e/0x160 [drm]
nouveau_drm_probe+0x1a9/0x230 [nouveau]
pci_device_probe+0xcd/0x150
driver_probe_device+0x30b/0x480
__driver_attach+0xbc/0xe0
bus_for_each_dev+0x67/0x90
bus_add_driver+0x164/0x260
driver_register+0x57/0xc0
do_one_initcall+0x4d/0x323
do_init_module+0x5b/0x1f8
load_module+0x20e5/0x2ac0
__do_sys_finit_module+0xb7/0xd0
do_syscall_64+0x60/0x1b0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
-> #2 (crtc_ww_class_acquire){+.+.}:
drm_helper_probe_single_connector_modes+0x58/0x6b0 [drm_kms_helper]
drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper]
__drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
nouveau_fbcon_init+0x138/0x1a0 [nouveau]
nouveau_drm_load+0x173/0x7e0 [nouveau]
drm_dev_register+0x134/0x1c0 [drm]
drm_get_pci_dev+0x8e/0x160 [drm]
nouveau_drm_probe+0x1a9/0x230 [nouveau]
pci_device_probe+0xcd/0x150
driver_probe_device+0x30b/0x480
__driver_attach+0xbc/0xe0
bus_for_each_dev+0x67/0x90
bus_add_driver+0x164/0x260
driver_register+0x57/0xc0
do_one_initcall+0x4d/0x323
do_init_module+0x5b/0x1f8
load_module+0x20e5/0x2ac0
__do_sys_finit_module+0xb7/0xd0
do_syscall_64+0x60/0x1b0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
-> #1 (&dev->mode_config.mutex){+.+.}:
drm_setup_crtcs+0x10c/0xc90 [drm_kms_helper]
__drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper]
nouveau_fbcon_init+0x138/0x1a0 [nouveau]
nouveau_drm_load+0x173/0x7e0 [nouveau]
drm_dev_register+0x134/0x1c0 [drm]
drm_get_pci_dev+0x8e/0x160 [drm]
nouveau_drm_probe+0x1a9/0x230 [nouveau]
pci_device_probe+0xcd/0x150
driver_probe_device+0x30b/0x480
__driver_attach+0xbc/0xe0
bus_for_each_dev+0x67/0x90
bus_add_driver+0x164/0x260
driver_register+0x57/0xc0
do_one_initcall+0x4d/0x323
do_init_module+0x5b/0x1f8
load_module+0x20e5/0x2ac0
__do_sys_finit_module+0xb7/0xd0
do_syscall_64+0x60/0x1b0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
-> #0 (&helper->lock){+.+.}:
__mutex_lock+0x70/0x9d0
drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
nv50_mstm_register_connector+0x2c/0x50 [nouveau]
drm_dp_add_port+0x2f5/0x420 [drm_kms_helper]
drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
drm_dp_add_port+0x33f/0x420 [drm_kms_helper]
drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper]
drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper]
process_one_work+0x20d/0x650
worker_thread+0x3a/0x390
kthread+0x11e/0x140
ret_from_fork+0x3a/0x50
other info that might help us debug this:
Chain exists of:
&helper->lock --> crtc_ww_class_acquire --> crtc_ww_class_mutex
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(crtc_ww_class_mutex);
lock(crtc_ww_class_acquire);
lock(crtc_ww_class_mutex);
lock(&helper->lock);
*** DEADLOCK ***
5 locks held by kworker/1:0/18:
#0: 000000004a05cd50 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x187/0x650
#1: 00000000601c11d1 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x187/0x650
#2: 00000000586ca0df (&dev->mode_config.mutex){+.+.}, at: drm_modeset_lock_all+0x3a/0x1b0 [drm]
#3: 00000000d3ca0ffa (crtc_ww_class_acquire){+.+.}, at: drm_modeset_lock_all+0x44/0x1b0 [drm]
#4: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm]
stack backtrace:
CPU: 1 PID: 18 Comm: kworker/1:0 Tainted: G O 4.17.0-rc3Lyude-Test+ #1
Hardware name: Gateway FX6840/FX6840, BIOS P01-A3 05/17/2010
Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper]
Call Trace:
dump_stack+0x85/0xcb
print_circular_bug.isra.38+0x1ce/0x1db
__lock_acquire+0x128f/0x1350
? lock_acquire+0x9f/0x200
? lock_acquire+0x9f/0x200
? __ww_mutex_lock.constprop.13+0x8f/0x1000
lock_acquire+0x9f/0x200
? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
__mutex_lock+0x70/0x9d0
? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
? ww_mutex_lock+0x43/0x80
? _cond_resched+0x15/0x30
? ww_mutex_lock+0x43/0x80
? drm_modeset_lock+0xb2/0x130 [drm]
? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper]
nv50_mstm_register_connector+0x2c/0x50 [nouveau]
drm_dp_add_port+0x2f5/0x420 [drm_kms_helper]
? mark_held_locks+0x50/0x80
? kfree+0xcf/0x2a0
? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper]
? trace_hardirqs_on_caller+0xed/0x180
? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper]
drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
drm_dp_add_port+0x33f/0x420 [drm_kms_helper]
? nouveau_connector_aux_xfer+0x7c/0xb0 [nouveau]
? find_held_lock+0x2d/0x90
? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper]
? __mutex_unlock_slowpath+0x3b/0x280
? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper]
drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper]
drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper]
drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper]
process_one_work+0x20d/0x650
worker_thread+0x3a/0x390
? process_one_work+0x650/0x650
kthread+0x11e/0x140
? kthread_create_worker_on_cpu+0x50/0x50
ret_from_fork+0x3a/0x50
Taking example from i915, the only time we need to hold any modesetting
locks is when changing the port on the mstc, and in that case we only
need to hold the connection mutex.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: Karol Herbst <kherbst@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-05-03 06:38:48 +07:00
|
|
|
|
2018-05-18 23:51:32 +07:00
|
|
|
drm_connector_put(&mstc->connector);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_register_connector(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
|
|
|
|
|
|
|
drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
|
|
|
|
|
|
|
|
drm_connector_register(connector);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_connector *
|
|
|
|
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|
|
|
struct drm_dp_mst_port *port, const char *path)
|
|
|
|
{
|
|
|
|
struct nv50_mstm *mstm = nv50_mstm(mgr);
|
|
|
|
struct nv50_mstc *mstc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nv50_mstc_new(mstm, port, path, &mstc);
|
2019-01-11 07:53:35 +07:00
|
|
|
if (ret)
|
2016-11-04 14:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &mstc->connector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_dp_mst_topology_cbs
|
|
|
|
nv50_mstm = {
|
|
|
|
.add_connector = nv50_mstm_add_connector,
|
|
|
|
.register_connector = nv50_mstm_register_connector,
|
|
|
|
.destroy_connector = nv50_mstm_destroy_connector,
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
nv50_mstm_service(struct nv50_mstm *mstm)
|
|
|
|
{
|
2017-10-03 13:24:28 +07:00
|
|
|
struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
|
2016-11-04 14:20:36 +07:00
|
|
|
bool handled = true;
|
|
|
|
int ret;
|
|
|
|
u8 esi[8] = {};
|
|
|
|
|
2017-10-03 13:24:28 +07:00
|
|
|
if (!aux)
|
|
|
|
return;
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
while (handled) {
|
|
|
|
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
|
|
|
|
if (ret != 8) {
|
|
|
|
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
|
|
|
|
if (!handled)
|
|
|
|
break;
|
|
|
|
|
|
|
|
drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nv50_mstm_remove(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
if (mstm)
|
|
|
|
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static int
|
|
|
|
nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
|
|
|
|
{
|
|
|
|
struct nouveau_encoder *outp = mstm->outp;
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_dp_mst_link_v0 mst;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
|
|
|
|
.base.hasht = outp->dcb->hasht,
|
|
|
|
.base.hashm = outp->dcb->hashm,
|
|
|
|
.mst.state = state,
|
|
|
|
};
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nvif_object *disp = &drm->display->disp.object;
|
2016-11-04 14:20:36 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (dpcd >= 0x12) {
|
2018-08-10 05:22:06 +07:00
|
|
|
/* Even if we're enabling MST, start with disabling the
|
|
|
|
* branching unit to clear any sink-side MST topology state
|
|
|
|
* that wasn't set by us
|
|
|
|
*/
|
|
|
|
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2018-08-10 05:22:06 +07:00
|
|
|
if (state) {
|
|
|
|
/* Now, start initializing */
|
|
|
|
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
|
|
|
|
DP_MST_EN);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return nvif_mthd(disp, 0, &args, sizeof(args));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
|
|
|
|
{
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
struct drm_dp_aux *aux;
|
|
|
|
int ret;
|
|
|
|
bool old_state, new_state;
|
|
|
|
u8 mstm_ctrl;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
if (!mstm)
|
|
|
|
return 0;
|
|
|
|
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
mutex_lock(&mstm->mgr.lock);
|
|
|
|
|
|
|
|
old_state = mstm->mgr.mst_state;
|
|
|
|
new_state = old_state;
|
|
|
|
aux = mstm->mgr.aux;
|
|
|
|
|
|
|
|
if (old_state) {
|
|
|
|
/* Just check that the MST hub is still as we expect it */
|
|
|
|
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
|
|
|
|
if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
|
|
|
|
DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
|
|
|
|
new_state = false;
|
|
|
|
}
|
|
|
|
} else if (dpcd[0] >= 0x12) {
|
|
|
|
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret < 0)
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
goto probe_error;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2016-11-07 11:51:53 +07:00
|
|
|
if (!(dpcd[1] & DP_MST_CAP))
|
|
|
|
dpcd[0] = 0x11;
|
|
|
|
else
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
new_state = allow;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
if (new_state == old_state) {
|
|
|
|
mutex_unlock(&mstm->mgr.lock);
|
|
|
|
return new_state;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret)
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
goto probe_error;
|
|
|
|
|
|
|
|
mutex_unlock(&mstm->mgr.lock);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret)
|
|
|
|
return nv50_mstm_enable(mstm, dpcd[0], 0);
|
|
|
|
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-10 05:22:05 +07:00
|
|
|
return new_state;
|
|
|
|
|
|
|
|
probe_error:
|
|
|
|
mutex_unlock(&mstm->mgr.lock);
|
|
|
|
return ret;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
|
|
|
nv50_mstm_fini(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
if (mstm && mstm->mgr.mst_state)
|
|
|
|
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_init(struct nv50_mstm *mstm)
|
|
|
|
{
|
2018-11-15 08:39:51 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!mstm || !mstm->mgr.mst_state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
|
|
|
|
if (ret == -1) {
|
|
|
|
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
|
|
|
|
drm_kms_helper_hotplug_event(mstm->mgr.dev);
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
|
|
|
nv50_mstm_del(struct nv50_mstm **pmstm)
|
|
|
|
{
|
|
|
|
struct nv50_mstm *mstm = *pmstm;
|
|
|
|
if (mstm) {
|
2018-12-12 06:56:20 +07:00
|
|
|
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
|
2016-11-04 14:20:36 +07:00
|
|
|
kfree(*pmstm);
|
|
|
|
*pmstm = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
|
|
|
|
int conn_base_id, struct nv50_mstm **pmstm)
|
|
|
|
{
|
|
|
|
const int max_payloads = hweight8(outp->dcb->heads);
|
|
|
|
struct drm_device *dev = outp->base.base.dev;
|
|
|
|
struct nv50_mstm *mstm;
|
2016-11-04 14:20:36 +07:00
|
|
|
int ret, i;
|
|
|
|
u8 dpcd;
|
|
|
|
|
|
|
|
/* This is a workaround for some monitors not functioning
|
|
|
|
* correctly in MST mode on initial module load. I think
|
|
|
|
* some bad interaction with the VBIOS may be responsible.
|
|
|
|
*
|
|
|
|
* A good ol' off and on again seems to work here ;)
|
|
|
|
*/
|
|
|
|
ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
|
|
|
|
if (ret >= 0 && dpcd >= 0x12)
|
|
|
|
drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
|
|
|
|
return -ENOMEM;
|
|
|
|
mstm->outp = outp;
|
2016-11-04 14:20:36 +07:00
|
|
|
mstm->mgr.cbs = &nv50_mstm;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2017-01-25 06:49:29 +07:00
|
|
|
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
|
2016-11-04 14:20:36 +07:00
|
|
|
max_payloads, conn_base_id);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
for (i = 0; i < max_payloads; i++) {
|
|
|
|
ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
|
|
|
|
i, &mstm->msto[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-04 13:25:18 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* SOR
|
|
|
|
*****************************************************************************/
|
2012-03-12 12:23:44 +07:00
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_head_atom *asyh, u8 proto, u8 depth)
|
2012-03-12 12:23:44 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = disp->core;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
if (!asyh) {
|
2016-11-04 14:20:36 +07:00
|
|
|
nv_encoder->ctrl &= ~BIT(head);
|
|
|
|
if (!(nv_encoder->ctrl & 0x0000000f))
|
|
|
|
nv_encoder->ctrl = 0;
|
|
|
|
} else {
|
|
|
|
nv_encoder->ctrl |= proto << 8;
|
|
|
|
nv_encoder->ctrl |= BIT(head);
|
2018-05-08 17:39:47 +07:00
|
|
|
asyh->or.depth = depth;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
|
2014-06-05 07:59:55 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_sor_disable(struct drm_encoder *encoder)
|
2014-06-05 07:59:55 +07:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
|
2012-11-16 08:40:34 +07:00
|
|
|
|
|
|
|
nv_encoder->crtc = NULL;
|
2014-06-05 07:59:55 +07:00
|
|
|
|
|
|
|
if (nv_crtc) {
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nvkm_i2c_aux *aux = nv_encoder->aux;
|
|
|
|
u8 pwr;
|
|
|
|
|
|
|
|
if (aux) {
|
|
|
|
int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
|
|
|
|
if (ret == 0) {
|
|
|
|
pwr &= ~DP_SET_POWER_MASK;
|
|
|
|
pwr |= DP_SET_POWER_D3;
|
|
|
|
nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_audio_disable(encoder, nv_crtc);
|
|
|
|
nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_release(nv_encoder);
|
2014-06-05 07:59:55 +07:00
|
|
|
}
|
2012-03-12 12:23:44 +07:00
|
|
|
}
|
|
|
|
|
2011-07-05 10:08:40 +07:00
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_sor_enable(struct drm_encoder *encoder)
|
2011-07-05 10:08:40 +07:00
|
|
|
{
|
2014-08-10 01:10:27 +07:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
|
|
|
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
|
2014-08-10 01:10:27 +07:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_lvds_script_v0 lvds;
|
|
|
|
} lvds = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2011-11-11 15:13:13 +07:00
|
|
|
struct drm_device *dev = encoder->dev;
|
2012-07-31 13:16:21 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2011-07-08 09:52:14 +07:00
|
|
|
struct nouveau_connector *nv_connector;
|
2012-07-31 13:16:21 +07:00
|
|
|
struct nvbios *bios = &drm->vbios;
|
2012-11-16 08:40:34 +07:00
|
|
|
u8 proto = 0xf;
|
|
|
|
u8 depth = 0x0;
|
2011-07-05 10:08:40 +07:00
|
|
|
|
2011-07-08 09:52:14 +07:00
|
|
|
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
2014-06-05 07:59:55 +07:00
|
|
|
nv_encoder->crtc = encoder->crtc;
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_acquire(nv_encoder);
|
2014-06-05 07:59:55 +07:00
|
|
|
|
2011-07-08 09:52:14 +07:00
|
|
|
switch (nv_encoder->dcb->type) {
|
2012-07-11 07:44:20 +07:00
|
|
|
case DCB_OUTPUT_TMDS:
|
2017-05-19 20:59:35 +07:00
|
|
|
if (nv_encoder->link & 1) {
|
2015-11-04 09:00:10 +07:00
|
|
|
proto = 0x1;
|
|
|
|
/* Only enable dual-link if:
|
|
|
|
* - Need to (i.e. rate > 165MHz)
|
|
|
|
* - DCB says we can
|
|
|
|
* - Not an HDMI monitor, since there's no dual-link
|
|
|
|
* on HDMI.
|
|
|
|
*/
|
|
|
|
if (mode->clock >= 165000 &&
|
|
|
|
nv_encoder->dcb->duallink_possible &&
|
|
|
|
!drm_detect_hdmi_monitor(nv_connector->edid))
|
|
|
|
proto |= 0x4;
|
2011-07-08 09:52:14 +07:00
|
|
|
} else {
|
2012-11-16 08:40:34 +07:00
|
|
|
proto = 0x2;
|
2011-07-08 09:52:14 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_hdmi_enable(&nv_encoder->base.base, mode);
|
2011-07-08 09:52:14 +07:00
|
|
|
break;
|
2012-07-11 07:44:20 +07:00
|
|
|
case DCB_OUTPUT_LVDS:
|
2012-11-16 08:40:34 +07:00
|
|
|
proto = 0x0;
|
|
|
|
|
2011-07-08 09:52:14 +07:00
|
|
|
if (bios->fp_no_ddc) {
|
|
|
|
if (bios->fp.dual_link)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 09:52:14 +07:00
|
|
|
if (bios->fp.if_is_24bit)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 09:52:14 +07:00
|
|
|
} else {
|
2011-11-18 07:23:59 +07:00
|
|
|
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
|
2011-07-08 09:52:14 +07:00
|
|
|
if (((u8 *)nv_connector->edid)[121] == 2)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 09:52:14 +07:00
|
|
|
} else
|
|
|
|
if (mode->clock >= bios->fp.duallink_transition_clk) {
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 09:52:14 +07:00
|
|
|
}
|
2011-07-05 10:08:40 +07:00
|
|
|
|
2014-08-10 01:10:27 +07:00
|
|
|
if (lvds.lvds.script & 0x0100) {
|
2011-07-08 09:52:14 +07:00
|
|
|
if (bios->fp.strapless_is_24bit & 2)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 09:52:14 +07:00
|
|
|
} else {
|
|
|
|
if (bios->fp.strapless_is_24bit & 1)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 09:52:14 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nv_connector->base.display_info.bpc == 8)
|
2014-08-10 01:10:27 +07:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 09:52:14 +07:00
|
|
|
}
|
2012-11-09 08:25:37 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
|
2011-07-08 09:52:14 +07:00
|
|
|
break;
|
2012-07-11 07:44:20 +07:00
|
|
|
case DCB_OUTPUT_DP:
|
2016-11-04 14:20:36 +07:00
|
|
|
if (nv_connector->base.display_info.bpc == 6)
|
2012-11-16 08:40:34 +07:00
|
|
|
depth = 0x2;
|
2016-11-04 14:20:36 +07:00
|
|
|
else
|
|
|
|
if (nv_connector->base.display_info.bpc == 8)
|
2012-11-16 08:40:34 +07:00
|
|
|
depth = 0x5;
|
2016-11-04 14:20:36 +07:00
|
|
|
else
|
2012-11-21 11:49:54 +07:00
|
|
|
depth = 0x6;
|
2012-03-10 22:28:48 +07:00
|
|
|
|
2017-05-19 20:59:35 +07:00
|
|
|
if (nv_encoder->link & 1)
|
2012-11-16 08:40:34 +07:00
|
|
|
proto = 0x8;
|
2012-03-10 22:28:48 +07:00
|
|
|
else
|
2012-11-16 08:40:34 +07:00
|
|
|
proto = 0x9;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
nv50_audio_enable(encoder, mode);
|
2012-03-10 22:28:48 +07:00
|
|
|
break;
|
2011-07-08 09:52:14 +07:00
|
|
|
default:
|
2016-03-03 09:56:33 +07:00
|
|
|
BUG();
|
2011-07-08 09:52:14 +07:00
|
|
|
break;
|
|
|
|
}
|
2011-07-08 08:53:37 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
|
2011-07-05 10:08:40 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_sor_help = {
|
2016-11-04 14:20:36 +07:00
|
|
|
.atomic_check = nv50_outp_atomic_check,
|
|
|
|
.enable = nv50_sor_enable,
|
|
|
|
.disable = nv50_sor_disable,
|
2016-11-04 14:20:36 +07:00
|
|
|
};
|
|
|
|
|
2011-07-05 10:08:40 +07:00
|
|
|
static void
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_sor_destroy(struct drm_encoder *encoder)
|
2011-07-05 10:08:40 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
nv50_mstm_del(&nv_encoder->dp.mstm);
|
2011-07-05 10:08:40 +07:00
|
|
|
drm_encoder_cleanup(encoder);
|
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_sor_func = {
|
2012-11-21 11:40:21 +07:00
|
|
|
.destroy = nv50_sor_destroy,
|
2011-07-05 10:08:40 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
2011-07-05 10:08:40 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
2013-02-11 17:15:03 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
2016-05-18 10:57:42 +07:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2011-07-05 10:08:40 +07:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
2018-05-08 17:39:47 +07:00
|
|
|
u8 ver, hdr, cnt, len;
|
|
|
|
u32 data;
|
2016-11-04 14:20:36 +07:00
|
|
|
int type, ret;
|
2013-02-11 17:15:03 +07:00
|
|
|
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
|
|
|
default:
|
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
}
|
2011-07-05 10:08:40 +07:00
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
2016-11-04 14:20:36 +07:00
|
|
|
nv_encoder->update = nv50_sor_update;
|
2011-07-05 10:08:40 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
|
|
|
|
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_sor_help);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-07-09 15:40:07 +07:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2015-08-20 11:54:15 +07:00
|
|
|
if (dcbe->type == DCB_OUTPUT_DP) {
|
2017-07-19 13:49:59 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2015-08-20 11:54:15 +07:00
|
|
|
struct nvkm_i2c_aux *aux =
|
|
|
|
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
|
|
|
|
if (aux) {
|
2018-05-08 17:39:47 +07:00
|
|
|
if (disp->disp->object.oclass < GF110_DISP) {
|
2017-07-19 13:49:59 +07:00
|
|
|
/* HW has no support for address-only
|
|
|
|
* transactions, so we're required to
|
|
|
|
* use custom I2C-over-AUX code.
|
|
|
|
*/
|
|
|
|
nv_encoder->i2c = &aux->i2c;
|
|
|
|
} else {
|
|
|
|
nv_encoder->i2c = &nv_connector->aux.ddc;
|
|
|
|
}
|
2015-08-20 11:54:15 +07:00
|
|
|
nv_encoder->aux = aux;
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
if ((data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) &&
|
|
|
|
ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) {
|
2016-11-04 14:20:36 +07:00
|
|
|
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
|
|
|
|
nv_connector->base.base.id,
|
|
|
|
&nv_encoder->dp.mstm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2015-08-20 11:54:15 +07:00
|
|
|
} else {
|
|
|
|
struct nvkm_i2c_bus *bus =
|
|
|
|
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
|
|
|
if (bus)
|
|
|
|
nv_encoder->i2c = &bus->i2c;
|
|
|
|
}
|
|
|
|
|
2011-07-05 10:08:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2013-02-11 06:52:58 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* PIOR
|
|
|
|
*****************************************************************************/
|
2016-11-04 14:20:36 +07:00
|
|
|
static int
|
|
|
|
nv50_pior_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
2013-02-11 06:52:58 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
crtc_state->adjusted_mode.clock *= 2;
|
|
|
|
return 0;
|
2013-02-11 06:52:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_pior_disable(struct drm_encoder *encoder)
|
2013-02-11 06:52:58 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
|
|
|
if (nv_encoder->crtc)
|
|
|
|
core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
|
2016-11-04 14:20:36 +07:00
|
|
|
nv_encoder->crtc = NULL;
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_release(nv_encoder);
|
2013-02-11 06:52:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_pior_enable(struct drm_encoder *encoder)
|
2013-02-11 06:52:58 +07:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
|
|
|
struct nouveau_connector *nv_connector;
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2013-02-11 06:52:58 +07:00
|
|
|
u8 owner = 1 << nv_crtc->index;
|
2018-05-08 17:39:47 +07:00
|
|
|
u8 proto;
|
2013-02-11 06:52:58 +07:00
|
|
|
|
2017-05-19 20:59:35 +07:00
|
|
|
nv50_outp_acquire(nv_encoder);
|
|
|
|
|
2013-02-11 06:52:58 +07:00
|
|
|
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
|
|
|
switch (nv_connector->base.display_info.bpc) {
|
2018-05-08 17:39:47 +07:00
|
|
|
case 10: asyh->or.depth = 0x6; break;
|
|
|
|
case 8: asyh->or.depth = 0x5; break;
|
|
|
|
case 6: asyh->or.depth = 0x2; break;
|
|
|
|
default: asyh->or.depth = 0x0; break;
|
2013-02-11 06:52:58 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (nv_encoder->dcb->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
|
|
|
proto = 0x0;
|
|
|
|
break;
|
|
|
|
default:
|
2016-03-03 09:56:33 +07:00
|
|
|
BUG();
|
2013-02-11 06:52:58 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
|
2013-02-11 06:52:58 +07:00
|
|
|
nv_encoder->crtc = encoder->crtc;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_pior_help = {
|
2016-11-04 14:20:36 +07:00
|
|
|
.atomic_check = nv50_pior_atomic_check,
|
|
|
|
.enable = nv50_pior_enable,
|
|
|
|
.disable = nv50_pior_disable,
|
2013-02-11 06:52:58 +07:00
|
|
|
};
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
|
|
|
nv50_pior_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
drm_encoder_cleanup(encoder);
|
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_pior_func = {
|
2013-02-11 06:52:58 +07:00
|
|
|
.destroy = nv50_pior_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
2016-05-18 10:57:42 +07:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2015-08-20 11:54:15 +07:00
|
|
|
struct nvkm_i2c_bus *bus = NULL;
|
|
|
|
struct nvkm_i2c_aux *aux = NULL;
|
|
|
|
struct i2c_adapter *ddc;
|
2013-02-11 06:52:58 +07:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
2015-08-20 11:54:15 +07:00
|
|
|
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
|
|
|
|
ddc = bus ? &bus->i2c : NULL;
|
2013-02-11 06:52:58 +07:00
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
case DCB_OUTPUT_DP:
|
2015-08-20 11:54:15 +07:00
|
|
|
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
|
2018-05-08 17:39:47 +07:00
|
|
|
ddc = aux ? &aux->i2c : NULL;
|
2013-02-11 06:52:58 +07:00
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
|
|
|
nv_encoder->i2c = ddc;
|
2015-08-20 11:54:15 +07:00
|
|
|
nv_encoder->aux = aux;
|
2013-02-11 06:52:58 +07:00
|
|
|
|
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
|
|
|
|
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_pior_help);
|
2013-02-11 06:52:58 +07:00
|
|
|
|
2018-07-09 15:40:07 +07:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2013-02-11 06:52:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* Atomic
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
static void
|
2018-07-03 07:52:34 +07:00
|
|
|
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2018-07-03 07:52:34 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(state->dev);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(drm->dev);
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = disp->core;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_mstm *mstm;
|
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_for_each_encoder(encoder, drm->dev) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
|
|
|
mstm = nouveau_encoder(encoder)->dp.mstm;
|
|
|
|
if (mstm && mstm->modified)
|
|
|
|
nv50_mstm_prepare(mstm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
|
|
|
|
core->func->update(core, interlock, true);
|
|
|
|
if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
|
|
|
|
disp->core->chan.base.device))
|
|
|
|
NV_ERROR(drm, "core notifier timeout\n");
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, drm->dev) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
|
|
|
mstm = nouveau_encoder(encoder)->dp.mstm;
|
|
|
|
if (mstm && mstm->modified)
|
|
|
|
nv50_mstm_cleanup(mstm);
|
|
|
|
}
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-07-03 07:52:34 +07:00
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
|
|
|
|
{
|
|
|
|
struct drm_plane_state *new_plane_state;
|
|
|
|
struct drm_plane *plane;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
|
|
|
|
if (wndw->func->update)
|
|
|
|
wndw->func->update(wndw, interlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = state->dev;
|
2017-08-15 15:52:50 +07:00
|
|
|
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_crtc *crtc;
|
2017-07-19 21:39:19 +07:00
|
|
|
struct drm_plane_state *new_plane_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_plane *plane;
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
|
struct nv50_disp *disp = nv50_disp(dev);
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
|
|
|
struct nv50_outp_atom *outp, *outt;
|
2018-05-08 17:39:47 +07:00
|
|
|
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
|
2016-11-04 14:20:36 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
|
|
|
|
drm_atomic_helper_wait_for_fences(dev, state, false);
|
|
|
|
drm_atomic_helper_wait_for_dependencies(state);
|
|
|
|
drm_atomic_helper_update_legacy_modeset_state(dev, state);
|
|
|
|
|
|
|
|
if (atom->lock_core)
|
|
|
|
mutex_lock(&disp->mutex);
|
|
|
|
|
|
|
|
/* Disable head(s). */
|
2017-08-15 15:52:50 +07:00
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
2017-07-19 21:39:19 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_head *head = nv50_head(crtc);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
|
|
|
|
asyh->clr.mask, asyh->set.mask);
|
2017-08-15 15:52:50 +07:00
|
|
|
if (old_crtc_state->active && !new_crtc_state->active)
|
2017-07-24 08:01:52 +07:00
|
|
|
drm_crtc_vblank_off(crtc);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
if (asyh->clr.mask) {
|
|
|
|
nv50_head_flush_clr(head, asyh, atom->flush_disable);
|
2018-05-08 17:39:47 +07:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable plane(s). */
|
2017-07-19 21:39:19 +07:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
|
|
|
|
asyw->clr.mask, asyw->set.mask);
|
|
|
|
if (!asyw->clr.mask)
|
|
|
|
continue;
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable output path(s). */
|
|
|
|
list_for_each_entry(outp, &atom->outp, head) {
|
|
|
|
const struct drm_encoder_helper_funcs *help;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
|
|
|
|
encoder = outp->encoder;
|
|
|
|
help = encoder->helper_private;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
|
|
|
|
outp->clr.mask, outp->set.mask);
|
|
|
|
|
|
|
|
if (outp->clr.mask) {
|
|
|
|
help->disable(encoder);
|
2018-05-08 17:39:47 +07:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
|
2016-11-04 14:20:36 +07:00
|
|
|
if (outp->flush_disable) {
|
2018-07-03 07:52:34 +07:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 17:39:47 +07:00
|
|
|
memset(interlock, 0x00, sizeof(interlock));
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush disable. */
|
2018-05-08 17:39:47 +07:00
|
|
|
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
|
2016-11-04 14:20:36 +07:00
|
|
|
if (atom->flush_disable) {
|
2018-07-03 07:52:34 +07:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 17:39:47 +07:00
|
|
|
memset(interlock, 0x00, sizeof(interlock));
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update output path(s). */
|
|
|
|
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
|
|
|
const struct drm_encoder_helper_funcs *help;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
|
|
|
|
encoder = outp->encoder;
|
|
|
|
help = encoder->helper_private;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
|
|
|
|
outp->set.mask, outp->clr.mask);
|
|
|
|
|
|
|
|
if (outp->set.mask) {
|
|
|
|
help->enable(encoder);
|
2018-05-08 17:39:47 +07:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&outp->head);
|
|
|
|
kfree(outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update head(s). */
|
2017-08-15 15:52:50 +07:00
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
2017-07-19 21:39:19 +07:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_head *head = nv50_head(crtc);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
|
|
|
|
asyh->set.mask, asyh->clr.mask);
|
|
|
|
|
|
|
|
if (asyh->set.mask) {
|
|
|
|
nv50_head_flush_set(head, asyh);
|
2018-05-08 17:39:47 +07:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2017-08-15 15:52:50 +07:00
|
|
|
if (new_crtc_state->active) {
|
|
|
|
if (!old_crtc_state->active)
|
2017-07-24 08:01:52 +07:00
|
|
|
drm_crtc_vblank_on(crtc);
|
2017-08-15 15:52:50 +07:00
|
|
|
if (new_crtc_state->event)
|
2017-07-24 08:01:52 +07:00
|
|
|
drm_crtc_vblank_get(crtc);
|
|
|
|
}
|
2017-01-24 06:32:26 +07:00
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
/* Update plane(s). */
|
2017-07-19 21:39:19 +07:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
|
|
|
|
asyw->set.mask, asyw->clr.mask);
|
|
|
|
if ( !asyw->set.mask &&
|
|
|
|
(!asyw->clr.mask || atom->flush_disable))
|
|
|
|
continue;
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_wndw_flush_set(wndw, interlock, asyw);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush update. */
|
2018-07-03 07:52:34 +07:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
2018-05-08 17:39:47 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
|
|
|
|
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
|
2018-07-03 07:52:34 +07:00
|
|
|
interlock[NV50_DISP_INTERLOCK_OVLY] ||
|
|
|
|
interlock[NV50_DISP_INTERLOCK_WNDW] ||
|
2018-05-08 17:39:47 +07:00
|
|
|
!atom->state.legacy_cursor_update)
|
2018-07-03 07:52:34 +07:00
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 17:39:47 +07:00
|
|
|
else
|
2018-05-08 17:39:47 +07:00
|
|
|
disp->core->func->update(disp->core, interlock, false);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (atom->lock_core)
|
|
|
|
mutex_unlock(&disp->mutex);
|
|
|
|
|
|
|
|
/* Wait for HW to signal completion. */
|
2017-07-19 21:39:19 +07:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
int ret = nv50_wndw_wait_armed(wndw, asyw);
|
|
|
|
if (ret)
|
|
|
|
NV_ERROR(drm, "%s: timeout\n", plane->name);
|
|
|
|
}
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
if (new_crtc_state->event) {
|
2016-11-04 14:20:36 +07:00
|
|
|
unsigned long flags;
|
2016-11-23 13:58:54 +07:00
|
|
|
/* Get correct count/ts if racing with vblank irq */
|
2017-08-15 15:52:50 +07:00
|
|
|
if (new_crtc_state->active)
|
2017-08-15 13:16:58 +07:00
|
|
|
drm_crtc_accurate_vblank_count(crtc);
|
2016-11-04 14:20:36 +07:00
|
|
|
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
2017-07-19 21:39:19 +07:00
|
|
|
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
|
2016-11-04 14:20:36 +07:00
|
|
|
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
2017-08-15 15:52:50 +07:00
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
new_crtc_state->event = NULL;
|
2017-08-15 15:52:50 +07:00
|
|
|
if (new_crtc_state->active)
|
2017-07-24 08:01:52 +07:00
|
|
|
drm_crtc_vblank_put(crtc);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_atomic_helper_commit_hw_done(state);
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
|
|
|
drm_atomic_helper_commit_cleanup_done(state);
|
|
|
|
drm_atomic_state_put(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct drm_atomic_state *state =
|
|
|
|
container_of(work, typeof(*state), commit_work);
|
|
|
|
nv50_disp_atomic_commit_tail(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_atomic_commit(struct drm_device *dev,
|
|
|
|
struct drm_atomic_state *state, bool nonblock)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2017-11-01 06:12:25 +07:00
|
|
|
struct drm_plane_state *new_plane_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_plane *plane;
|
|
|
|
struct drm_crtc *crtc;
|
|
|
|
bool active = false;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = pm_runtime_get_sync(dev->dev);
|
|
|
|
if (ret < 0 && ret != -EACCES)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = drm_atomic_helper_setup_commit(state, nonblock);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
|
|
|
|
|
|
|
|
ret = drm_atomic_helper_prepare_planes(dev, state);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (!nonblock) {
|
|
|
|
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
|
|
|
|
if (ret)
|
2017-07-11 21:33:03 +07:00
|
|
|
goto err_cleanup;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
2017-07-11 21:33:05 +07:00
|
|
|
ret = drm_atomic_helper_swap_state(state, true);
|
|
|
|
if (ret)
|
|
|
|
goto err_cleanup;
|
|
|
|
|
2017-11-01 06:12:25 +07:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
2017-07-19 21:39:19 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
if (asyw->set.image)
|
|
|
|
nv50_wndw_ntfy_enable(wndw, asyw);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_atomic_state_get(state);
|
|
|
|
|
|
|
|
if (nonblock)
|
|
|
|
queue_work(system_unbound_wq, &state->commit_work);
|
|
|
|
else
|
|
|
|
nv50_disp_atomic_commit_tail(state);
|
|
|
|
|
|
|
|
drm_for_each_crtc(crtc, dev) {
|
2018-07-13 00:02:53 +07:00
|
|
|
if (crtc->state->active) {
|
2016-11-04 14:20:36 +07:00
|
|
|
if (!drm->have_disp_power_ref) {
|
|
|
|
drm->have_disp_power_ref = true;
|
2017-07-11 21:33:03 +07:00
|
|
|
return 0;
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
active = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!active && drm->have_disp_power_ref) {
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
drm->have_disp_power_ref = false;
|
|
|
|
}
|
|
|
|
|
2017-07-11 21:33:03 +07:00
|
|
|
err_cleanup:
|
|
|
|
if (ret)
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
2016-11-04 14:20:36 +07:00
|
|
|
done:
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nv50_outp_atom *
|
|
|
|
nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
|
|
|
list_for_each_entry(outp, &atom->outp, head) {
|
|
|
|
if (outp->encoder == encoder)
|
|
|
|
return outp;
|
|
|
|
}
|
|
|
|
|
|
|
|
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
|
|
|
|
if (!outp)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
list_add(&outp->head, &atom->outp);
|
|
|
|
outp->encoder = encoder;
|
|
|
|
return outp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
|
2017-07-19 21:39:19 +07:00
|
|
|
struct drm_connector_state *old_connector_state)
|
2016-11-04 14:20:36 +07:00
|
|
|
{
|
2017-07-19 21:39:19 +07:00
|
|
|
struct drm_encoder *encoder = old_connector_state->best_encoder;
|
|
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
if (!(crtc = old_connector_state->crtc))
|
2016-11-04 14:20:36 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
|
|
|
|
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
|
|
|
|
if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
2016-11-04 14:20:36 +07:00
|
|
|
outp = nv50_disp_outp_atomic_add(atom, encoder);
|
|
|
|
if (IS_ERR(outp))
|
|
|
|
return PTR_ERR(outp);
|
|
|
|
|
|
|
|
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
outp->flush_disable = true;
|
|
|
|
atom->flush_disable = true;
|
|
|
|
}
|
|
|
|
outp->clr.ctrl = true;
|
|
|
|
atom->lock_core = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
|
|
|
|
struct drm_connector_state *connector_state)
|
|
|
|
{
|
|
|
|
struct drm_encoder *encoder = connector_state->best_encoder;
|
2017-07-19 21:39:19 +07:00
|
|
|
struct drm_crtc_state *new_crtc_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
|
|
|
if (!(crtc = connector_state->crtc))
|
|
|
|
return 0;
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
|
|
|
|
if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
2016-11-04 14:20:36 +07:00
|
|
|
outp = nv50_disp_outp_atomic_add(atom, encoder);
|
|
|
|
if (IS_ERR(outp))
|
|
|
|
return PTR_ERR(outp);
|
|
|
|
|
|
|
|
outp->set.ctrl = true;
|
|
|
|
atom->lock_core = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
2017-07-19 21:39:19 +07:00
|
|
|
struct drm_connector_state *old_connector_state, *new_connector_state;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_connector *connector;
|
2018-05-08 17:39:47 +07:00
|
|
|
struct drm_crtc_state *new_crtc_state;
|
|
|
|
struct drm_crtc *crtc;
|
2016-11-04 14:20:36 +07:00
|
|
|
int ret, i;
|
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
/* We need to handle colour management on a per-plane basis. */
|
|
|
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
if (new_crtc_state->color_mgmt_changed) {
|
|
|
|
ret = drm_atomic_add_affected_planes(state, crtc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
ret = drm_atomic_helper_check(dev, state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
|
|
|
|
ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-19 21:39:19 +07:00
|
|
|
ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
|
2016-11-04 14:20:36 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-11 07:53:43 +07:00
|
|
|
ret = drm_dp_mst_atomic_check(state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
|
|
|
struct nv50_outp_atom *outp, *outt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
|
|
|
list_del(&outp->head);
|
|
|
|
kfree(outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_atomic_state_default_clear(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_state_free(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
|
|
|
drm_atomic_state_default_release(&atom->state);
|
|
|
|
kfree(atom);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_atomic_state *
|
|
|
|
nv50_disp_atomic_state_alloc(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom;
|
|
|
|
if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
|
|
|
|
drm_atomic_state_init(dev, &atom->state) < 0) {
|
|
|
|
kfree(atom);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
INIT_LIST_HEAD(&atom->outp);
|
|
|
|
return &atom->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_mode_config_funcs
|
|
|
|
nv50_disp_func = {
|
|
|
|
.fb_create = nouveau_user_framebuffer_create,
|
drm/nouveau/drm/nouveau: Fix deadlock with fb_helper with async RPM requests
Currently, nouveau uses the generic drm_fb_helper_output_poll_changed()
function provided by DRM as it's output_poll_changed callback.
Unfortunately however, this function doesn't grab runtime PM references
early enough and even if it did-we can't block waiting for the device to
resume in output_poll_changed() since it's very likely that we'll need
to grab the fb_helper lock at some point during the runtime resume
process. This currently results in deadlocking like so:
[ 246.669625] INFO: task kworker/4:0:37 blocked for more than 120 seconds.
[ 246.673398] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.675271] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.676527] kworker/4:0 D 0 37 2 0x80000000
[ 246.677580] Workqueue: events output_poll_execute [drm_kms_helper]
[ 246.678704] Call Trace:
[ 246.679753] __schedule+0x322/0xaf0
[ 246.680916] schedule+0x33/0x90
[ 246.681924] schedule_preempt_disabled+0x15/0x20
[ 246.683023] __mutex_lock+0x569/0x9a0
[ 246.684035] ? kobject_uevent_env+0x117/0x7b0
[ 246.685132] ? drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.686179] mutex_lock_nested+0x1b/0x20
[ 246.687278] ? mutex_lock_nested+0x1b/0x20
[ 246.688307] drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.689420] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper]
[ 246.690462] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper]
[ 246.691570] output_poll_execute+0x198/0x1c0 [drm_kms_helper]
[ 246.692611] process_one_work+0x231/0x620
[ 246.693725] worker_thread+0x214/0x3a0
[ 246.694756] kthread+0x12b/0x150
[ 246.695856] ? wq_pool_ids_show+0x140/0x140
[ 246.696888] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.697998] ret_from_fork+0x3a/0x50
[ 246.699034] INFO: task kworker/0:1:60 blocked for more than 120 seconds.
[ 246.700153] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.701182] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.702278] kworker/0:1 D 0 60 2 0x80000000
[ 246.703293] Workqueue: pm pm_runtime_work
[ 246.704393] Call Trace:
[ 246.705403] __schedule+0x322/0xaf0
[ 246.706439] ? wait_for_completion+0x104/0x190
[ 246.707393] schedule+0x33/0x90
[ 246.708375] schedule_timeout+0x3a5/0x590
[ 246.709289] ? mark_held_locks+0x58/0x80
[ 246.710208] ? _raw_spin_unlock_irq+0x2c/0x40
[ 246.711222] ? wait_for_completion+0x104/0x190
[ 246.712134] ? trace_hardirqs_on_caller+0xf4/0x190
[ 246.713094] ? wait_for_completion+0x104/0x190
[ 246.713964] wait_for_completion+0x12c/0x190
[ 246.714895] ? wake_up_q+0x80/0x80
[ 246.715727] ? get_work_pool+0x90/0x90
[ 246.716649] flush_work+0x1c9/0x280
[ 246.717483] ? flush_workqueue_prep_pwqs+0x1b0/0x1b0
[ 246.718442] __cancel_work_timer+0x146/0x1d0
[ 246.719247] cancel_delayed_work_sync+0x13/0x20
[ 246.720043] drm_kms_helper_poll_disable+0x1f/0x30 [drm_kms_helper]
[ 246.721123] nouveau_pmops_runtime_suspend+0x3d/0xb0 [nouveau]
[ 246.721897] pci_pm_runtime_suspend+0x6b/0x190
[ 246.722825] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.723737] __rpm_callback+0x7a/0x1d0
[ 246.724721] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.725607] rpm_callback+0x24/0x80
[ 246.726553] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.727376] rpm_suspend+0x142/0x6b0
[ 246.728185] pm_runtime_work+0x97/0xc0
[ 246.728938] process_one_work+0x231/0x620
[ 246.729796] worker_thread+0x44/0x3a0
[ 246.730614] kthread+0x12b/0x150
[ 246.731395] ? wq_pool_ids_show+0x140/0x140
[ 246.732202] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.732878] ret_from_fork+0x3a/0x50
[ 246.733768] INFO: task kworker/4:2:422 blocked for more than 120 seconds.
[ 246.734587] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.735393] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.736113] kworker/4:2 D 0 422 2 0x80000080
[ 246.736789] Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper]
[ 246.737665] Call Trace:
[ 246.738490] __schedule+0x322/0xaf0
[ 246.739250] schedule+0x33/0x90
[ 246.739908] rpm_resume+0x19c/0x850
[ 246.740750] ? finish_wait+0x90/0x90
[ 246.741541] __pm_runtime_resume+0x4e/0x90
[ 246.742370] nv50_disp_atomic_commit+0x31/0x210 [nouveau]
[ 246.743124] drm_atomic_commit+0x4a/0x50 [drm]
[ 246.743775] restore_fbdev_mode_atomic+0x1c8/0x240 [drm_kms_helper]
[ 246.744603] restore_fbdev_mode+0x31/0x140 [drm_kms_helper]
[ 246.745373] drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0xb0 [drm_kms_helper]
[ 246.746220] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper]
[ 246.746884] drm_fb_helper_hotplug_event.part.28+0x96/0xb0 [drm_kms_helper]
[ 246.747675] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper]
[ 246.748544] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper]
[ 246.749439] nv50_mstm_hotplug+0x15/0x20 [nouveau]
[ 246.750111] drm_dp_send_link_address+0x177/0x1c0 [drm_kms_helper]
[ 246.750764] drm_dp_check_and_send_link_address+0xa8/0xd0 [drm_kms_helper]
[ 246.751602] drm_dp_mst_link_probe_work+0x51/0x90 [drm_kms_helper]
[ 246.752314] process_one_work+0x231/0x620
[ 246.752979] worker_thread+0x44/0x3a0
[ 246.753838] kthread+0x12b/0x150
[ 246.754619] ? wq_pool_ids_show+0x140/0x140
[ 246.755386] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.756162] ret_from_fork+0x3a/0x50
[ 246.756847]
Showing all locks held in the system:
[ 246.758261] 3 locks held by kworker/4:0/37:
[ 246.759016] #0: 00000000f8df4d2d ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.759856] #1: 00000000e6065461 ((work_completion)(&(&dev->mode_config.output_poll_work)->work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.760670] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.761516] 2 locks held by kworker/0:1/60:
[ 246.762274] #0: 00000000fff6be0f ((wq_completion)"pm"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.762982] #1: 000000005ab44fb4 ((work_completion)(&dev->power.work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.763890] 1 lock held by khungtaskd/64:
[ 246.764664] #0: 000000008cb8b5c3 (rcu_read_lock){....}, at: debug_show_all_locks+0x23/0x185
[ 246.765588] 5 locks held by kworker/4:2/422:
[ 246.766440] #0: 00000000232f0959 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.767390] #1: 00000000bb59b134 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.768154] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_restore_fbdev_mode_unlocked+0x4c/0xb0 [drm_kms_helper]
[ 246.768966] #3: 000000004c8f0b6b (crtc_ww_class_acquire){+.+.}, at: restore_fbdev_mode_atomic+0x4b/0x240 [drm_kms_helper]
[ 246.769921] #4: 000000004c34a296 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8a/0x1b0 [drm]
[ 246.770839] 1 lock held by dmesg/1038:
[ 246.771739] 2 locks held by zsh/1172:
[ 246.772650] #0: 00000000836d0438 (&tty->ldisc_sem){++++}, at: ldsem_down_read+0x37/0x40
[ 246.773680] #1: 000000001f4f4d48 (&ldata->atomic_read_lock){+.+.}, at: n_tty_read+0xc1/0x870
[ 246.775522] =============================================
After trying dozens of different solutions, I found one very simple one
that should also have the benefit of preventing us from having to fight
locking for the rest of our lives. So, we work around these deadlocks by
deferring all fbcon hotplug events that happen after the runtime suspend
process starts until after the device is resumed again.
Changes since v7:
- Fixup commit message - Daniel Vetter
Changes since v6:
- Remove unused nouveau_fbcon_hotplugged_in_suspend() - Ilia
Changes since v5:
- Come up with the (hopefully final) solution for solving this dumb
problem, one that is a lot less likely to cause issues with locking in
the future. This should work around all deadlock conditions with fbcon
brought up thus far.
Changes since v4:
- Add nouveau_fbcon_hotplugged_in_suspend() to workaround deadlock
condition that Lukas described
- Just move all of this out of drm_fb_helper. It seems that other DRM
drivers have already figured out other workarounds for this. If other
drivers do end up needing this in the future, we can just move this
back into drm_fb_helper again.
Changes since v3:
- Actually check if fb_helper is NULL in both new helpers
- Actually check drm_fbdev_emulation in both new helpers
- Don't fire off a fb_helper hotplug unconditionally; only do it if
the following conditions are true (as otherwise, calling this in the
wrong spot will cause Bad Things to happen):
- fb_helper hotplug handling was actually inhibited previously
- fb_helper actually has a delayed hotplug pending
- fb_helper is actually bound
- fb_helper is actually initialized
- Add __must_check to drm_fb_helper_suspend_hotplug(). There's no
situation where a driver would actually want to use this without
checking the return value, so enforce that
- Rewrite and clarify the documentation for both helpers.
- Make sure to return true in the drm_fb_helper_suspend_hotplug() stub
that's provided in drm_fb_helper.h when CONFIG_DRM_FBDEV_EMULATION
isn't enabled
- Actually grab the toplevel fb_helper lock in
drm_fb_helper_resume_hotplug(), since it's possible other activity
(such as a hotplug) could be going on at the same time the driver
calls drm_fb_helper_resume_hotplug(). We need this to check whether or
not drm_fb_helper_hotplug_event() needs to be called anyway
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Karol Herbst <kherbst@redhat.com>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Cc: stable@vger.kernel.org
Cc: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-16 02:00:13 +07:00
|
|
|
.output_poll_changed = nouveau_fbcon_output_poll_changed,
|
2016-11-04 14:20:36 +07:00
|
|
|
.atomic_check = nv50_disp_atomic_check,
|
|
|
|
.atomic_commit = nv50_disp_atomic_commit,
|
|
|
|
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
|
|
|
|
.atomic_state_clear = nv50_disp_atomic_state_clear,
|
|
|
|
.atomic_state_free = nv50_disp_atomic_state_free,
|
|
|
|
};
|
|
|
|
|
2011-07-04 13:25:18 +07:00
|
|
|
/******************************************************************************
|
|
|
|
* Init
|
|
|
|
*****************************************************************************/
|
2014-08-10 01:10:19 +07:00
|
|
|
|
2019-02-12 19:28:13 +07:00
|
|
|
static void
|
2019-02-12 19:28:13 +07:00
|
|
|
nv50_display_fini(struct drm_device *dev, bool suspend)
|
2011-07-04 13:25:18 +07:00
|
|
|
{
|
2016-11-04 14:20:36 +07:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_plane *plane;
|
|
|
|
|
|
|
|
drm_for_each_plane(plane, dev) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (plane->funcs != &nv50_wndw)
|
|
|
|
continue;
|
|
|
|
nv50_wndw_fini(wndw);
|
|
|
|
}
|
2016-11-04 14:20:36 +07:00
|
|
|
|
|
|
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
|
|
|
nv_encoder = nouveau_encoder(encoder);
|
|
|
|
nv50_mstm_fini(nv_encoder->dp.mstm);
|
|
|
|
}
|
|
|
|
}
|
2011-07-04 13:25:18 +07:00
|
|
|
}
|
|
|
|
|
2019-02-12 19:28:13 +07:00
|
|
|
static int
|
2019-02-12 19:28:13 +07:00
|
|
|
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
|
2011-07-04 13:25:18 +07:00
|
|
|
{
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_core *core = nv50_disp(dev)->core;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 14:20:36 +07:00
|
|
|
struct drm_plane *plane;
|
2013-03-02 10:21:31 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
core->func->init(core);
|
2016-11-04 14:20:36 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
2017-05-19 20:59:35 +07:00
|
|
|
struct nouveau_encoder *nv_encoder =
|
|
|
|
nouveau_encoder(encoder);
|
2016-11-04 14:20:36 +07:00
|
|
|
nv50_mstm_init(nv_encoder->dp.mstm);
|
2016-11-04 14:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
drm_for_each_plane(plane, dev) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (plane->funcs != &nv50_wndw)
|
|
|
|
continue;
|
|
|
|
nv50_wndw_init(wndw);
|
|
|
|
}
|
|
|
|
|
2013-03-02 10:21:31 +07:00
|
|
|
return 0;
|
2011-07-04 13:25:18 +07:00
|
|
|
}
|
|
|
|
|
2019-02-12 19:28:13 +07:00
|
|
|
static void
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_display_destroy(struct drm_device *dev)
|
2011-07-04 13:25:18 +07:00
|
|
|
{
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp = nv50_disp(dev);
|
2011-11-11 22:30:24 +07:00
|
|
|
|
2018-05-08 17:39:47 +07:00
|
|
|
nv50_core_del(&disp->core);
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2011-11-16 12:48:48 +07:00
|
|
|
nouveau_bo_unmap(disp->sync);
|
2012-11-26 05:04:23 +07:00
|
|
|
if (disp->sync)
|
|
|
|
nouveau_bo_unpin(disp->sync);
|
2011-11-16 12:48:48 +07:00
|
|
|
nouveau_bo_ref(NULL, &disp->sync);
|
2011-07-05 07:33:08 +07:00
|
|
|
|
2012-07-31 13:16:21 +07:00
|
|
|
nouveau_display(dev)->priv = NULL;
|
2011-07-04 13:25:18 +07:00
|
|
|
kfree(disp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_display_create(struct drm_device *dev)
|
2011-07-04 13:25:18 +07:00
|
|
|
{
|
2016-05-18 10:57:42 +07:00
|
|
|
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
2012-07-31 13:16:21 +07:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
|
struct dcb_table *dcb = &drm->vbios.dcb;
|
2011-07-05 10:08:40 +07:00
|
|
|
struct drm_connector *connector, *tmp;
|
2012-11-21 11:40:21 +07:00
|
|
|
struct nv50_disp *disp;
|
2012-07-11 07:44:20 +07:00
|
|
|
struct dcb_output *dcbe;
|
2012-03-04 13:25:59 +07:00
|
|
|
int crtcs, ret, i;
|
2011-07-04 13:25:18 +07:00
|
|
|
|
|
|
|
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
|
|
|
|
if (!disp)
|
|
|
|
return -ENOMEM;
|
2012-07-31 13:16:21 +07:00
|
|
|
|
2016-11-04 14:20:36 +07:00
|
|
|
mutex_init(&disp->mutex);
|
|
|
|
|
2012-07-31 13:16:21 +07:00
|
|
|
nouveau_display(dev)->priv = disp;
|
2012-11-21 11:40:21 +07:00
|
|
|
nouveau_display(dev)->dtor = nv50_display_destroy;
|
|
|
|
nouveau_display(dev)->init = nv50_display_init;
|
|
|
|
nouveau_display(dev)->fini = nv50_display_fini;
|
2014-08-10 01:10:22 +07:00
|
|
|
disp->disp = &nouveau_display(dev)->disp;
|
2016-11-04 14:20:36 +07:00
|
|
|
dev->mode_config.funcs = &nv50_disp_func;
|
2018-09-05 13:04:40 +07:00
|
|
|
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
|
2011-07-04 13:25:18 +07:00
|
|
|
|
2012-10-16 11:18:32 +07:00
|
|
|
/* small shared memory area we use for notifiers and semaphores */
|
2016-05-24 14:26:48 +07:00
|
|
|
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
2014-01-09 17:03:15 +07:00
|
|
|
0, 0x0000, NULL, NULL, &disp->sync);
|
2012-10-16 11:18:32 +07:00
|
|
|
if (!ret) {
|
2014-11-10 09:35:06 +07:00
|
|
|
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
|
2012-11-26 05:04:23 +07:00
|
|
|
if (!ret) {
|
2012-10-16 11:18:32 +07:00
|
|
|
ret = nouveau_bo_map(disp->sync);
|
2012-11-26 05:04:23 +07:00
|
|
|
if (ret)
|
|
|
|
nouveau_bo_unpin(disp->sync);
|
|
|
|
}
|
2012-10-16 11:18:32 +07:00
|
|
|
if (ret)
|
|
|
|
nouveau_bo_ref(NULL, &disp->sync);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* allocate master evo channel */
|
2018-05-08 17:39:47 +07:00
|
|
|
ret = nv50_core_new(drm, &disp->core);
|
2012-10-16 11:18:32 +07:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2011-07-05 13:48:06 +07:00
|
|
|
/* create crtc objects to represent the hw heads */
|
2018-05-08 17:39:48 +07:00
|
|
|
if (disp->disp->object.oclass >= GV100_DISP)
|
|
|
|
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
|
|
|
|
else
|
2018-05-08 17:39:47 +07:00
|
|
|
if (disp->disp->object.oclass >= GF110_DISP)
|
2017-07-04 00:06:26 +07:00
|
|
|
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
|
2012-11-16 08:44:14 +07:00
|
|
|
else
|
2017-07-04 00:06:26 +07:00
|
|
|
crtcs = 0x3;
|
2012-11-16 08:44:14 +07:00
|
|
|
|
2017-07-04 00:06:26 +07:00
|
|
|
for (i = 0; i < fls(crtcs); i++) {
|
|
|
|
if (!(crtcs & (1 << i)))
|
|
|
|
continue;
|
2016-11-04 14:20:36 +07:00
|
|
|
ret = nv50_head_create(dev, i);
|
2011-07-05 13:48:06 +07:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-07-05 10:08:40 +07:00
|
|
|
/* create encoder/connector objects based on VBIOS DCB table */
|
|
|
|
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
|
2018-07-13 00:13:52 +07:00
|
|
|
connector = nouveau_connector_create(dev, dcbe);
|
2011-07-05 10:08:40 +07:00
|
|
|
if (IS_ERR(connector))
|
|
|
|
continue;
|
|
|
|
|
2013-02-11 06:52:58 +07:00
|
|
|
if (dcbe->location == DCB_LOC_ON_CHIP) {
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_LVDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
|
|
|
ret = nv50_sor_create(connector, dcbe);
|
|
|
|
break;
|
|
|
|
case DCB_OUTPUT_ANALOG:
|
|
|
|
ret = nv50_dac_create(connector, dcbe);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -ENODEV;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = nv50_pior_create(connector, dcbe);
|
2011-07-05 10:08:40 +07:00
|
|
|
}
|
|
|
|
|
2013-02-11 06:52:58 +07:00
|
|
|
if (ret) {
|
|
|
|
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
|
|
|
|
dcbe->location, dcbe->type,
|
|
|
|
ffs(dcbe->or) - 1, ret);
|
2013-03-05 19:26:06 +07:00
|
|
|
ret = 0;
|
2011-07-05 10:08:40 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cull any connectors we created that don't have an encoder */
|
|
|
|
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
|
|
|
|
if (connector->encoder_ids[0])
|
|
|
|
continue;
|
|
|
|
|
2012-07-31 13:16:21 +07:00
|
|
|
NV_WARN(drm, "%s has no encoders, removing\n",
|
2014-06-03 18:56:18 +07:00
|
|
|
connector->name);
|
2011-07-05 10:08:40 +07:00
|
|
|
connector->funcs->destroy(connector);
|
|
|
|
}
|
|
|
|
|
drm/nouveau/kms/nv50-: Allow vblank_disable_immediate
With instantaneous high precision vblank timestamping
that updates at leading edge of vblank, the emulated
"hw vblank counter" from vblank timestamping, which
increments at leading edge of vblank, and reliable
page flip execution and completion at leading edge of
vblank, we should meet the requirements for fast/
immediate vblank irq disable/enable.
This is only allowed on nv50+ gpu's, ie. the ones with
atomic modesetting. One requirement for immediate vblank
disable is that high precision vblank timestamping works
reliably all the time on all connectors. This is not the
case on all pre-nv50 parts for analog VGA outputs, where we
currently don't always have support for scanout position
queries and therefore fall back to vblank interrupt
timestamping. The implementation in nv04_head_state() does
not return valid values for vblanks, vtotal, hblanks, htotal
for VGA outputs on all cards, but those are needed for scanout
position queries.
Testing on Linux-4.12-rc5 + drm-next on a GeForce 9500 GT
(NV G96) with timing measurement equipment indicates this
works fine, so allow immediate vblank disable for power
saving.
For debugging in case of unexpected trouble, booting
with kernel cmdline option drm.vblankoffdelay=0
(or echo 0 > /sys/module/drm/parameters/vblankoffdelay)
would keep vblank irqs permanently on to approximate old
behavior.
Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-07-16 13:47:50 +07:00
|
|
|
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
|
|
|
|
dev->vblank_disable_immediate = true;
|
|
|
|
|
2011-07-04 13:25:18 +07:00
|
|
|
out:
|
|
|
|
if (ret)
|
2012-11-21 11:40:21 +07:00
|
|
|
nv50_display_destroy(dev);
|
2011-07-04 13:25:18 +07:00
|
|
|
return ret;
|
|
|
|
}
|