mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge branch 'linux-5.8' of git://github.com/skeggsb/linux into drm-next
- HD audio fixes on recent systems - vGPU detection (fail probe if we're on one, for now) - Interlaced mode fixes (mostly avoidance on Turing, which doesn't support it) - SVM improvements/fixes - NVIDIA format modifier support - Misc other fixes. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Ben Skeggs <skeggsb@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/ <CACAvsv6DcRFMDVEftdL7LxNtxuSQQ=qnfqdHXO0K=BmJ8Q2-+g@mail.gmail.com
This commit is contained in:
commit
918b73dcfc
@ -1,8 +1,10 @@
|
||||
NOUVEAU_PATH ?= $(srctree)
|
||||
|
||||
# SPDX-License-Identifier: MIT
|
||||
ccflags-y += -I $(srctree)/$(src)/include
|
||||
ccflags-y += -I $(srctree)/$(src)/include/nvkm
|
||||
ccflags-y += -I $(srctree)/$(src)/nvkm
|
||||
ccflags-y += -I $(srctree)/$(src)
|
||||
ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
|
||||
ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
|
||||
ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
|
||||
ccflags-y += -I $(NOUVEAU_PATH)/$(src)
|
||||
|
||||
# NVKM - HW resource manager
|
||||
#- code also used by various userspace tools/tests
|
||||
|
@ -605,15 +605,16 @@ static int
|
||||
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct nv04_display *disp = nv04_display(crtc->dev);
|
||||
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
if (ret == 0) {
|
||||
if (disp->image[nv_crtc->index])
|
||||
nouveau_bo_unpin(disp->image[nv_crtc->index]);
|
||||
nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
|
||||
nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
|
||||
struct nouveau_bo *nvbo;
|
||||
struct drm_framebuffer *drm_fb;
|
||||
struct nouveau_framebuffer *fb;
|
||||
int arb_burst, arb_lwm;
|
||||
|
||||
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
|
||||
@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
||||
*/
|
||||
if (atomic) {
|
||||
drm_fb = passed_fb;
|
||||
fb = nouveau_framebuffer(passed_fb);
|
||||
} else {
|
||||
drm_fb = crtc->primary->fb;
|
||||
fb = nouveau_framebuffer(crtc->primary->fb);
|
||||
}
|
||||
|
||||
nv_crtc->fb.offset = fb->nvbo->bo.offset;
|
||||
nvbo = nouveau_gem_object(drm_fb->obj[0]);
|
||||
nv_crtc->fb.offset = nvbo->bo.offset;
|
||||
|
||||
if (nv_crtc->lut.depth != drm_fb->format->depth) {
|
||||
nv_crtc->lut.depth = drm_fb->format->depth;
|
||||
@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
|
||||
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
|
||||
struct drm_framebuffer *old_fb = crtc->primary->fb;
|
||||
struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
|
||||
struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
|
||||
struct nv04_page_flip_state *s;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_cli *cli;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "nouveau_encoder.h"
|
||||
#include "nouveau_connector.h"
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_gem.h"
|
||||
|
||||
#include <nvif/if0004.h>
|
||||
|
||||
@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
|
||||
|
||||
/* Un-pin FB and cursors so they'll be evicted to system memory. */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct nouveau_framebuffer *nouveau_fb;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
|
||||
if (!nouveau_fb || !nouveau_fb->nvbo)
|
||||
if (!fb || !fb->obj[0])
|
||||
continue;
|
||||
|
||||
nouveau_bo_unpin(nouveau_fb->nvbo);
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
@ -104,13 +105,13 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
|
||||
|
||||
/* Re-pin FB/cursors. */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct nouveau_framebuffer *nouveau_fb;
|
||||
struct drm_framebuffer *fb = crtc->primary->fb;
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
|
||||
if (!nouveau_fb || !nouveau_fb->nvbo)
|
||||
if (!fb || !fb->obj[0])
|
||||
continue;
|
||||
|
||||
ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
if (ret)
|
||||
NV_ERROR(drm, "Could not pin framebuffer\n");
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_connector.h"
|
||||
#include "nouveau_display.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nvreg.h"
|
||||
#include "disp.h"
|
||||
|
||||
@ -120,9 +121,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct nvif_object *dev = &drm->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nouveau_bo *cur = nv_plane->cur;
|
||||
struct nouveau_bo *nvbo;
|
||||
bool flip = nv_plane->flip;
|
||||
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
|
||||
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
|
||||
@ -140,17 +141,18 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_plane->cur = nv_fb->nvbo;
|
||||
nv_plane->cur = nvbo;
|
||||
|
||||
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
|
||||
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
|
||||
|
||||
nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
|
||||
nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
|
||||
nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->bo.offset);
|
||||
nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
|
||||
nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
|
||||
nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
|
||||
@ -172,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (format & NV_PVIDEO_FORMAT_PLANAR) {
|
||||
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
|
||||
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
|
||||
nv_fb->nvbo->bo.offset + fb->offsets[1]);
|
||||
nvbo->bo.offset + fb->offsets[1]);
|
||||
}
|
||||
nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
|
||||
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
|
||||
@ -368,8 +370,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
|
||||
struct nouveau_bo *cur = nv_plane->cur;
|
||||
struct nouveau_bo *nvbo;
|
||||
uint32_t overlay = 1;
|
||||
int brightness = (nv_plane->brightness - 512) * 62 / 512;
|
||||
int ret, i;
|
||||
@ -384,11 +386,12 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_plane->cur = nv_fb->nvbo;
|
||||
nv_plane->cur = nvbo;
|
||||
|
||||
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
|
||||
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
|
||||
@ -396,7 +399,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
|
||||
nv_fb->nvbo->bo.offset);
|
||||
nvbo->bo.offset);
|
||||
nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
|
||||
fb->pitches[0]);
|
||||
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
|
||||
|
@ -263,7 +263,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
|
||||
struct nv50_disp_base_channel_dma_v0 args = {
|
||||
.head = head,
|
||||
};
|
||||
struct nv50_disp *disp = nv50_disp(drm->dev);
|
||||
struct nouveau_display *disp = nouveau_display(drm->dev);
|
||||
struct nv50_disp *disp50 = nv50_disp(drm->dev);
|
||||
struct nv50_wndw *wndw;
|
||||
int ret;
|
||||
|
||||
@ -273,9 +274,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
|
||||
if (*pwndw = wndw, ret)
|
||||
return ret;
|
||||
|
||||
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
|
||||
ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
|
||||
&oclass, head, &args, sizeof(args),
|
||||
disp->sync->bo.offset, &wndw->wndw);
|
||||
disp50->sync->bo.offset, &wndw->wndw);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
|
||||
return ret;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define __NV50_KMS_CORE_H__
|
||||
#include "disp.h"
|
||||
#include "atom.h"
|
||||
#include <nouveau_encoder.h>
|
||||
|
||||
struct nv50_core {
|
||||
const struct nv50_core_func *func;
|
||||
@ -15,6 +16,7 @@ void nv50_core_del(struct nv50_core **);
|
||||
struct nv50_core_func {
|
||||
void (*init)(struct nv50_core *);
|
||||
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
|
||||
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
|
||||
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
|
||||
struct nvif_device *);
|
||||
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
|
||||
@ -27,6 +29,9 @@ struct nv50_core_func {
|
||||
const struct nv50_outp_func {
|
||||
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
|
||||
struct nv50_head_atom *);
|
||||
/* XXX: Only used by SORs and PIORs for now */
|
||||
void (*get_caps)(struct nv50_disp *,
|
||||
struct nouveau_encoder *, int or);
|
||||
} *dac, *pior, *sor;
|
||||
};
|
||||
|
||||
@ -35,6 +40,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
|
||||
struct nv50_core **);
|
||||
void core507d_init(struct nv50_core *);
|
||||
void core507d_ntfy_init(struct nouveau_bo *, u32);
|
||||
int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
|
||||
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
|
||||
void core507d_update(struct nv50_core *, u32 *, bool);
|
||||
|
||||
@ -51,6 +57,7 @@ extern const struct nv50_outp_func sor907d;
|
||||
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
|
||||
|
||||
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
|
||||
int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
|
||||
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
|
||||
void corec37d_update(struct nv50_core *, u32 *, bool);
|
||||
void corec37d_wndw_owner(struct nv50_core *);
|
||||
|
@ -62,6 +62,20 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
|
||||
nouveau_bo_wr32(bo, offset / 4, 0x00000000);
|
||||
}
|
||||
|
||||
int
|
||||
core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
|
||||
{
|
||||
u32 *push = evo_wait(&disp->core->chan, 2);
|
||||
|
||||
if (push) {
|
||||
evo_mthd(push, 0x008c, 1);
|
||||
evo_data(push, 0x0);
|
||||
evo_kick(push, &disp->core->chan);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
core507d_init(struct nv50_core *core)
|
||||
{
|
||||
@ -77,6 +91,7 @@ static const struct nv50_core_func
|
||||
core507d = {
|
||||
.init = core507d_init,
|
||||
.ntfy_init = core507d_ntfy_init,
|
||||
.caps_init = core507d_caps_init,
|
||||
.ntfy_wait_done = core507d_ntfy_wait_done,
|
||||
.update = core507d_update,
|
||||
.head = &head507d,
|
||||
|
@ -26,6 +26,7 @@ static const struct nv50_core_func
|
||||
core827d = {
|
||||
.init = core507d_init,
|
||||
.ntfy_init = core507d_ntfy_init,
|
||||
.caps_init = core507d_caps_init,
|
||||
.ntfy_wait_done = core507d_ntfy_wait_done,
|
||||
.update = core507d_update,
|
||||
.head = &head827d,
|
||||
|
@ -26,6 +26,7 @@ static const struct nv50_core_func
|
||||
core907d = {
|
||||
.init = core507d_init,
|
||||
.ntfy_init = core507d_ntfy_init,
|
||||
.caps_init = core507d_caps_init,
|
||||
.ntfy_wait_done = core507d_ntfy_wait_done,
|
||||
.update = core507d_update,
|
||||
.head = &head907d,
|
||||
|
@ -26,6 +26,7 @@ static const struct nv50_core_func
|
||||
core917d = {
|
||||
.init = core507d_init,
|
||||
.ntfy_init = core507d_ntfy_init,
|
||||
.caps_init = core507d_caps_init,
|
||||
.ntfy_wait_done = core507d_ntfy_wait_done,
|
||||
.update = core507d_update,
|
||||
.head = &head917d,
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "core.h"
|
||||
#include "head.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nouveau_bo.h>
|
||||
|
||||
#include <nvif/timer.h>
|
||||
@ -87,6 +88,30 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
|
||||
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
|
||||
}
|
||||
|
||||
int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
|
||||
NULL, 0, &disp->caps);
|
||||
if (ret) {
|
||||
NV_ERROR(drm,
|
||||
"Failed to init notifier caps region: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvif_object_map(&disp->caps, NULL, 0);
|
||||
if (ret) {
|
||||
NV_ERROR(drm,
|
||||
"Failed to map notifier caps region: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
corec37d_init(struct nv50_core *core)
|
||||
{
|
||||
@ -111,6 +136,7 @@ static const struct nv50_core_func
|
||||
corec37d = {
|
||||
.init = corec37d_init,
|
||||
.ntfy_init = corec37d_ntfy_init,
|
||||
.caps_init = corec37d_caps_init,
|
||||
.ntfy_wait_done = corec37d_ntfy_wait_done,
|
||||
.update = corec37d_update,
|
||||
.wndw.owner = corec37d_wndw_owner,
|
||||
|
@ -46,6 +46,7 @@ static const struct nv50_core_func
|
||||
corec57d = {
|
||||
.init = corec57d_init,
|
||||
.ntfy_init = corec37d_ntfy_init,
|
||||
.caps_init = corec37d_caps_init,
|
||||
.ntfy_wait_done = corec37d_ntfy_wait_done,
|
||||
.update = corec37d_update,
|
||||
.wndw.owner = corec37d_wndw_owner,
|
||||
|
@ -32,7 +32,7 @@
|
||||
bool
|
||||
curs507a_space(struct nv50_wndw *wndw)
|
||||
{
|
||||
nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
|
||||
nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
|
||||
if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
|
||||
return true;
|
||||
);
|
||||
|
@ -482,15 +482,16 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
* audio component binding for ELD notification
|
||||
*/
|
||||
static void
|
||||
nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
|
||||
nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
|
||||
int dev_id)
|
||||
{
|
||||
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
|
||||
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
|
||||
port, -1);
|
||||
port, dev_id);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
|
||||
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
|
||||
bool *enabled, unsigned char *buf, int max_bytes)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(kdev);
|
||||
@ -506,7 +507,8 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
|
||||
nv_encoder = nouveau_encoder(encoder);
|
||||
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
||||
nv_crtc = nouveau_crtc(encoder->crtc);
|
||||
if (!nv_connector || !nv_crtc || nv_crtc->index != port)
|
||||
if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
|
||||
nv_crtc->index != dev_id)
|
||||
continue;
|
||||
*enabled = drm_detect_monitor_audio(nv_connector->edid);
|
||||
if (*enabled) {
|
||||
@ -600,7 +602,8 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
||||
|
||||
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
||||
|
||||
nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
|
||||
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
||||
nv_crtc->index);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -634,7 +637,8 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
|
||||
nvif_mthd(&disp->disp->object, 0, &args,
|
||||
sizeof(args.base) + drm_eld_size(args.data));
|
||||
|
||||
nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
|
||||
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
||||
nv_crtc->index);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -904,15 +908,9 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
|
||||
if (!state->duplicated) {
|
||||
const int clock = crtc_state->adjusted_mode.clock;
|
||||
|
||||
/*
|
||||
* XXX: Since we don't use HDR in userspace quite yet, limit
|
||||
* the bpc to 8 to save bandwidth on the topology. In the
|
||||
* future, we'll want to properly fix this by dynamically
|
||||
* selecting the highest possible bpc that would fit in the
|
||||
* topology
|
||||
*/
|
||||
asyh->or.bpc = min(connector->display_info.bpc, 8U);
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
|
||||
asyh->or.bpc = connector->display_info.bpc;
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
|
||||
false);
|
||||
}
|
||||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
|
||||
@ -1058,7 +1056,14 @@ static enum drm_mode_status
|
||||
nv50_mstc_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
return MODE_OK;
|
||||
struct nv50_mstc *mstc = nv50_mstc(connector);
|
||||
struct nouveau_encoder *outp = mstc->mstm->outp;
|
||||
|
||||
/* TODO: calculate the PBN from the dotclock and validate against the
|
||||
* MSTB's max possible PBN
|
||||
*/
|
||||
|
||||
return nv50_dp_mode_valid(connector, outp, mode, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1072,8 +1077,17 @@ nv50_mstc_get_modes(struct drm_connector *connector)
|
||||
if (mstc->edid)
|
||||
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
|
||||
|
||||
if (!mstc->connector.display_info.bpc)
|
||||
mstc->connector.display_info.bpc = 8;
|
||||
/*
|
||||
* XXX: Since we don't use HDR in userspace quite yet, limit the bpc
|
||||
* to 8 to save bandwidth on the topology. In the future, we'll want
|
||||
* to properly fix this by dynamically selecting the highest possible
|
||||
* bpc that would fit in the topology
|
||||
*/
|
||||
if (connector->display_info.bpc)
|
||||
connector->display_info.bpc =
|
||||
clamp(connector->display_info.bpc, 6U, 8U);
|
||||
else
|
||||
connector->display_info.bpc = 8;
|
||||
|
||||
if (mstc->native)
|
||||
drm_mode_destroy(mstc->connector.dev, mstc->native);
|
||||
@ -1123,8 +1137,10 @@ nv50_mstc_detect(struct drm_connector *connector,
|
||||
return connector_status_disconnected;
|
||||
|
||||
ret = pm_runtime_get_sync(connector->dev->dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
|
||||
mstc->port);
|
||||
@ -1659,6 +1675,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct nv50_disp *disp = nv50_disp(connector->dev);
|
||||
int type, ret;
|
||||
|
||||
switch (dcbe->type) {
|
||||
@ -1685,10 +1702,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
|
||||
|
||||
if (dcbe->type == DCB_OUTPUT_DP) {
|
||||
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
||||
struct nvkm_i2c_aux *aux =
|
||||
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
|
||||
|
||||
if (aux) {
|
||||
if (disp->disp->object.oclass < GF110_DISP) {
|
||||
/* HW has no support for address-only
|
||||
@ -1801,7 +1820,9 @@ nv50_pior_func = {
|
||||
static int
|
||||
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nv50_disp *disp = nv50_disp(dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus = NULL;
|
||||
struct nvkm_i2c_aux *aux = NULL;
|
||||
@ -1840,6 +1861,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
drm_encoder_helper_add(encoder, &nv50_pior_help);
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2369,7 +2393,8 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_plane *plane;
|
||||
|
||||
core->func->init(core);
|
||||
if (resume || runtime)
|
||||
core->func->init(core);
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
||||
@ -2396,6 +2421,8 @@ nv50_display_destroy(struct drm_device *dev)
|
||||
|
||||
nv50_audio_component_fini(nouveau_drm(dev));
|
||||
|
||||
nvif_object_unmap(&disp->caps);
|
||||
nvif_object_fini(&disp->caps);
|
||||
nv50_core_del(&disp->core);
|
||||
|
||||
nouveau_bo_unmap(disp->sync);
|
||||
@ -2456,6 +2483,22 @@ nv50_display_create(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
disp->core->func->init(disp->core);
|
||||
if (disp->core->func->caps_init) {
|
||||
ret = disp->core->func->caps_init(drm, disp);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Assign the correct format modifiers */
|
||||
if (disp->disp->object.oclass >= TU102_DISP)
|
||||
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
|
||||
else
|
||||
if (disp->disp->object.oclass >= GF110_DISP)
|
||||
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
|
||||
else
|
||||
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
|
||||
|
||||
/* create crtc objects to represent the hw heads */
|
||||
if (disp->disp->object.oclass >= GV100_DISP)
|
||||
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
|
||||
@ -2551,3 +2594,53 @@ nv50_display_create(struct drm_device *dev)
|
||||
nv50_display_destroy(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Format modifiers
|
||||
*****************************************************************************/
|
||||
|
||||
/****************************************************************
|
||||
* Log2(block height) ----------------------------+ *
|
||||
* Page Kind ----------------------------------+ | *
|
||||
* Gob Height/Page Kind Generation ------+ | | *
|
||||
* Sector layout -------+ | | | *
|
||||
* Compression ------+ | | | | */
|
||||
const u64 disp50xx_modifiers[] = { /* | | | | | */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
/****************************************************************
|
||||
* Log2(block height) ----------------------------+ *
|
||||
* Page Kind ----------------------------------+ | *
|
||||
* Gob Height/Page Kind Generation ------+ | | *
|
||||
* Sector layout -------+ | | | *
|
||||
* Compression ------+ | | | | */
|
||||
const u64 disp90xx_modifiers[] = { /* | | | | | */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
@ -9,6 +9,7 @@ struct nv50_msto;
|
||||
struct nv50_disp {
|
||||
struct nvif_disp *disp;
|
||||
struct nv50_core *core;
|
||||
struct nvif_object caps;
|
||||
|
||||
#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
|
||||
#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
|
||||
@ -78,6 +79,10 @@ void nv50_dmac_destroy(struct nv50_dmac *);
|
||||
u32 *evo_wait(struct nv50_dmac *, int nr);
|
||||
void evo_kick(u32 *, struct nv50_dmac *);
|
||||
|
||||
extern const u64 disp50xx_modifiers[];
|
||||
extern const u64 disp90xx_modifiers[];
|
||||
extern const u64 wndwc57e_modifiers[];
|
||||
|
||||
#define evo_mthd(p, m, s) do { \
|
||||
const u32 _m = (m), _s = (s); \
|
||||
if (drm_debug_enabled(DRM_UT_KMS)) \
|
||||
|
@ -168,14 +168,15 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
|
||||
struct nv50_head_mode *m = &asyh->mode;
|
||||
u32 *push;
|
||||
if ((push = evo_wait(core, 12))) {
|
||||
if ((push = evo_wait(core, 13))) {
|
||||
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
|
||||
evo_data(push, (m->v.active << 16) | m->h.active );
|
||||
evo_data(push, (m->v.synce << 16) | m->h.synce );
|
||||
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
|
||||
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
|
||||
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
|
||||
evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
|
||||
evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
|
||||
evo_data(push, m->interlace);
|
||||
evo_data(push, m->clock * 1000);
|
||||
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
|
||||
evo_data(push, m->clock * 1000);
|
||||
|
@ -173,14 +173,15 @@ headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
|
||||
struct nv50_head_mode *m = &asyh->mode;
|
||||
u32 *push;
|
||||
if ((push = evo_wait(core, 12))) {
|
||||
if ((push = evo_wait(core, 13))) {
|
||||
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
|
||||
evo_data(push, (m->v.active << 16) | m->h.active );
|
||||
evo_data(push, (m->v.synce << 16) | m->h.synce );
|
||||
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
|
||||
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
|
||||
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
|
||||
evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
|
||||
evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
|
||||
evo_data(push, m->interlace);
|
||||
evo_data(push, m->clock * 1000);
|
||||
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
|
||||
evo_data(push, m->clock * 1000);
|
||||
|
@ -38,7 +38,15 @@ pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
|
||||
int or)
|
||||
{
|
||||
outp->caps.dp_interlace = true;
|
||||
}
|
||||
|
||||
const struct nv50_outp_func
|
||||
pior507d = {
|
||||
.ctrl = pior507d_ctrl,
|
||||
.get_caps = pior507d_get_caps,
|
||||
};
|
||||
|
@ -38,7 +38,14 @@ sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
|
||||
{
|
||||
outp->caps.dp_interlace = true;
|
||||
}
|
||||
|
||||
const struct nv50_outp_func
|
||||
sor507d = {
|
||||
.ctrl = sor507d_ctrl,
|
||||
.get_caps = sor507d_get_caps,
|
||||
};
|
||||
|
@ -21,6 +21,7 @@
|
||||
*/
|
||||
#include "core.h"
|
||||
|
||||
#include <nouveau_bo.h>
|
||||
#include <nvif/class.h>
|
||||
|
||||
static void
|
||||
@ -35,7 +36,17 @@ sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
|
||||
{
|
||||
const int off = or * 2;
|
||||
u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
|
||||
|
||||
outp->caps.dp_interlace = !!(tmp & 0x04000000);
|
||||
}
|
||||
|
||||
const struct nv50_outp_func
|
||||
sor907d = {
|
||||
.ctrl = sor907d_ctrl,
|
||||
.get_caps = sor907d_get_caps,
|
||||
};
|
||||
|
@ -33,7 +33,16 @@ sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
|
||||
{
|
||||
u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
|
||||
|
||||
outp->caps.dp_interlace = !!(tmp & 0x04000000);
|
||||
}
|
||||
|
||||
const struct nv50_outp_func
|
||||
sorc37d = {
|
||||
.ctrl = sorc37d_ctrl,
|
||||
.get_caps = sorc37d_get_caps,
|
||||
};
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_gem.h"
|
||||
|
||||
static void
|
||||
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
|
||||
@ -39,12 +40,13 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
|
||||
}
|
||||
|
||||
static struct nv50_wndw_ctxdma *
|
||||
nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
|
||||
nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
|
||||
struct nouveau_drm *drm = nouveau_drm(fb->dev);
|
||||
struct nv50_wndw_ctxdma *ctxdma;
|
||||
const u8 kind = fb->nvbo->kind;
|
||||
const u32 handle = 0xfb000000 | kind;
|
||||
u32 handle;
|
||||
u32 unused;
|
||||
u8 kind;
|
||||
struct {
|
||||
struct nv_dma_v0 base;
|
||||
union {
|
||||
@ -56,6 +58,9 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
|
||||
u32 argc = sizeof(args.base);
|
||||
int ret;
|
||||
|
||||
nouveau_framebuffer_get_layout(fb, &unused, &kind);
|
||||
handle = 0xfb000000 | kind;
|
||||
|
||||
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
|
||||
if (ctxdma->object.handle == handle)
|
||||
return ctxdma;
|
||||
@ -234,16 +239,20 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
|
||||
struct nv50_wndw_atom *asyw,
|
||||
struct nv50_head_atom *asyh)
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
|
||||
struct drm_framebuffer *fb = asyw->state.fb;
|
||||
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
|
||||
uint8_t kind;
|
||||
uint32_t tile_mode;
|
||||
int ret;
|
||||
|
||||
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
|
||||
|
||||
if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
|
||||
asyw->image.w = fb->base.width;
|
||||
asyw->image.h = fb->base.height;
|
||||
asyw->image.kind = fb->nvbo->kind;
|
||||
if (fb != armw->state.fb || !armw->visible || modeset) {
|
||||
nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
|
||||
|
||||
asyw->image.w = fb->width;
|
||||
asyw->image.h = fb->height;
|
||||
asyw->image.kind = kind;
|
||||
|
||||
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
|
||||
if (ret) {
|
||||
@ -255,16 +264,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
|
||||
if (asyw->image.kind) {
|
||||
asyw->image.layout = 0;
|
||||
if (drm->client.device.info.chipset >= 0xc0)
|
||||
asyw->image.blockh = fb->nvbo->mode >> 4;
|
||||
asyw->image.blockh = tile_mode >> 4;
|
||||
else
|
||||
asyw->image.blockh = fb->nvbo->mode;
|
||||
asyw->image.blocks[0] = fb->base.pitches[0] / 64;
|
||||
asyw->image.blockh = tile_mode;
|
||||
asyw->image.blocks[0] = fb->pitches[0] / 64;
|
||||
asyw->image.pitch[0] = 0;
|
||||
} else {
|
||||
asyw->image.layout = 1;
|
||||
asyw->image.blockh = 0;
|
||||
asyw->image.blocks[0] = 0;
|
||||
asyw->image.pitch[0] = fb->base.pitches[0];
|
||||
asyw->image.pitch[0] = fb->pitches[0];
|
||||
}
|
||||
|
||||
if (!asyh->state.async_flip)
|
||||
@ -471,47 +480,50 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
|
||||
static void
|
||||
nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
|
||||
struct nouveau_drm *drm = nouveau_drm(plane->dev);
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
|
||||
if (!old_state->fb)
|
||||
return;
|
||||
|
||||
nouveau_bo_unpin(fb->nvbo);
|
||||
nvbo = nouveau_gem_object(old_state->fb->obj[0]);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct nouveau_drm *drm = nouveau_drm(plane->dev);
|
||||
struct nv50_wndw *wndw = nv50_wndw(plane);
|
||||
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
|
||||
struct nouveau_bo *nvbo;
|
||||
struct nv50_head_atom *asyh;
|
||||
struct nv50_wndw_ctxdma *ctxdma;
|
||||
int ret;
|
||||
|
||||
NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
|
||||
NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
|
||||
if (!asyw->state.fb)
|
||||
return 0;
|
||||
|
||||
ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (wndw->ctxdma.parent) {
|
||||
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
|
||||
if (IS_ERR(ctxdma)) {
|
||||
nouveau_bo_unpin(fb->nvbo);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
return PTR_ERR(ctxdma);
|
||||
}
|
||||
|
||||
asyw->image.handle[0] = ctxdma->object.handle;
|
||||
}
|
||||
|
||||
asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
|
||||
asyw->image.offset[0] = fb->nvbo->bo.offset;
|
||||
asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
|
||||
asyw->image.offset[0] = nvbo->bo.offset;
|
||||
|
||||
if (wndw->func->prepare) {
|
||||
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
|
||||
@ -603,6 +615,29 @@ nv50_wndw_destroy(struct drm_plane *plane)
|
||||
kfree(wndw);
|
||||
}
|
||||
|
||||
/* This function assumes the format has already been validated against the plane
|
||||
* and the modifier was validated against the device-wides modifier list at FB
|
||||
* creation time.
|
||||
*/
|
||||
static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
|
||||
u32 format, u64 modifier)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(plane->dev);
|
||||
uint8_t i;
|
||||
|
||||
if (drm->client.device.info.chipset < 0xc0) {
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
const uint8_t kind = (modifier >> 12) & 0xff;
|
||||
|
||||
if (!format) return false;
|
||||
|
||||
for (i = 0; i < info->num_planes; i++)
|
||||
if ((info->cpp[i] != 4) && kind != 0x70) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct drm_plane_funcs
|
||||
nv50_wndw = {
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
@ -611,6 +646,7 @@ nv50_wndw = {
|
||||
.reset = nv50_wndw_reset,
|
||||
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
|
||||
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
|
||||
.format_mod_supported = nv50_plane_format_mod_supported,
|
||||
};
|
||||
|
||||
static int
|
||||
@ -658,7 +694,8 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
|
||||
for (nformat = 0; format[nformat]; nformat++);
|
||||
|
||||
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
|
||||
format, nformat, NULL,
|
||||
format, nformat,
|
||||
nouveau_display(dev)->format_modifiers,
|
||||
type, "%s-%d", name, index);
|
||||
if (ret) {
|
||||
kfree(*pwndw);
|
||||
|
@ -173,6 +173,23 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
|
||||
return true;
|
||||
}
|
||||
|
||||
/****************************************************************
|
||||
* Log2(block height) ----------------------------+ *
|
||||
* Page Kind ----------------------------------+ | *
|
||||
* Gob Height/Page Kind Generation ------+ | | *
|
||||
* Sector layout -------+ | | | *
|
||||
* Compression ------+ | | | | */
|
||||
const u64 wndwc57e_modifiers[] = { /* | | | | | */
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
|
||||
DRM_FORMAT_MOD_LINEAR,
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
static const struct nv50_wndw_func
|
||||
wndwc57e = {
|
||||
.acquire = wndwc37e_acquire,
|
||||
|
@ -89,6 +89,8 @@
|
||||
#define GV100_DISP /* cl5070.h */ 0x0000c370
|
||||
#define TU102_DISP /* cl5070.h */ 0x0000c570
|
||||
|
||||
#define GV100_DISP_CAPS 0x0000c373
|
||||
|
||||
#define NV31_MPEG 0x00003174
|
||||
#define G82_MPEG 0x00008274
|
||||
|
||||
|
@ -24,6 +24,8 @@ struct nvkm_subdev_func {
|
||||
};
|
||||
|
||||
extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
|
||||
int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
|
||||
int index, struct nvkm_subdev **);
|
||||
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
|
||||
int index, struct nvkm_subdev *);
|
||||
void nvkm_subdev_del(struct nvkm_subdev **);
|
||||
|
@ -49,7 +49,6 @@ static struct nouveau_dsm_priv {
|
||||
bool optimus_flags_detected;
|
||||
bool optimus_skip_dsm;
|
||||
acpi_handle dhandle;
|
||||
acpi_handle rom_handle;
|
||||
} nouveau_dsm_priv;
|
||||
|
||||
bool nouveau_is_optimus(void) {
|
||||
@ -212,37 +211,6 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
|
||||
.get_client_id = nouveau_dsm_get_client_id,
|
||||
};
|
||||
|
||||
/*
|
||||
* Firmware supporting Windows 8 or later do not use _DSM to put the device into
|
||||
* D3cold, they instead rely on disabling power resources on the parent.
|
||||
*/
|
||||
static bool nouveau_pr3_present(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
|
||||
struct acpi_device *parent_adev;
|
||||
|
||||
if (!parent_pdev)
|
||||
return false;
|
||||
|
||||
if (!parent_pdev->bridge_d3) {
|
||||
/*
|
||||
* Parent PCI bridge is currently not power managed.
|
||||
* Since userspace can change these afterwards to be on
|
||||
* the safe side we stick with _DSM and prevent usage of
|
||||
* _PR3 from the bridge.
|
||||
*/
|
||||
pci_d3cold_disable(pdev);
|
||||
return false;
|
||||
}
|
||||
|
||||
parent_adev = ACPI_COMPANION(&parent_pdev->dev);
|
||||
if (!parent_adev)
|
||||
return false;
|
||||
|
||||
return parent_adev->power.flags.power_resources &&
|
||||
acpi_has_method(parent_adev->handle, "_PR3");
|
||||
}
|
||||
|
||||
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
|
||||
bool *has_mux, bool *has_opt,
|
||||
bool *has_opt_flags, bool *has_pr3)
|
||||
@ -250,6 +218,16 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
|
||||
acpi_handle dhandle;
|
||||
bool supports_mux;
|
||||
int optimus_funcs;
|
||||
struct pci_dev *parent_pdev;
|
||||
|
||||
*has_pr3 = false;
|
||||
parent_pdev = pci_upstream_bridge(pdev);
|
||||
if (parent_pdev) {
|
||||
if (parent_pdev->bridge_d3)
|
||||
*has_pr3 = pci_pr3_present(parent_pdev);
|
||||
else
|
||||
pci_d3cold_disable(pdev);
|
||||
}
|
||||
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
@ -270,7 +248,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
|
||||
*has_mux = supports_mux;
|
||||
*has_opt = !!optimus_funcs;
|
||||
*has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
|
||||
*has_pr3 = false;
|
||||
|
||||
if (optimus_funcs) {
|
||||
uint32_t result;
|
||||
@ -280,8 +257,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
|
||||
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
|
||||
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
|
||||
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
|
||||
|
||||
*has_pr3 = nouveau_pr3_present(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -385,59 +360,6 @@ void nouveau_unregister_dsm_handler(void) {}
|
||||
void nouveau_switcheroo_optimus_dsm(void) {}
|
||||
#endif
|
||||
|
||||
/* retrieve the ROM in 4k blocks */
|
||||
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
|
||||
int offset, int len)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object rom_arg_elements[2], *obj;
|
||||
struct acpi_object_list rom_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
||||
rom_arg.count = 2;
|
||||
rom_arg.pointer = &rom_arg_elements[0];
|
||||
|
||||
rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
rom_arg_elements[0].integer.value = offset;
|
||||
|
||||
rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
rom_arg_elements[1].integer.value = len;
|
||||
|
||||
status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_info("failed to evaluate ROM got %s\n",
|
||||
acpi_format_exception(status));
|
||||
return -ENODEV;
|
||||
}
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
len = min(len, (int)obj->buffer.length);
|
||||
memcpy(bios+offset, obj->buffer.pointer, len);
|
||||
kfree(buffer.pointer);
|
||||
return len;
|
||||
}
|
||||
|
||||
bool nouveau_acpi_rom_supported(struct device *dev)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_handle dhandle, rom_handle;
|
||||
|
||||
dhandle = ACPI_HANDLE(dev);
|
||||
if (!dhandle)
|
||||
return false;
|
||||
|
||||
status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
nouveau_dsm_priv.rom_handle = rom_handle;
|
||||
return true;
|
||||
}
|
||||
|
||||
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
|
||||
{
|
||||
return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
|
||||
}
|
||||
|
||||
void *
|
||||
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
|
||||
{
|
||||
|
@ -10,8 +10,6 @@ bool nouveau_is_v1_dsm(void);
|
||||
void nouveau_register_dsm_handler(void);
|
||||
void nouveau_unregister_dsm_handler(void);
|
||||
void nouveau_switcheroo_optimus_dsm(void);
|
||||
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
bool nouveau_acpi_rom_supported(struct device *);
|
||||
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
|
||||
#else
|
||||
static inline bool nouveau_is_optimus(void) { return false; };
|
||||
@ -19,8 +17,6 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
|
||||
static inline void nouveau_register_dsm_handler(void) {}
|
||||
static inline void nouveau_unregister_dsm_handler(void) {}
|
||||
static inline void nouveau_switcheroo_optimus_dsm(void) {}
|
||||
static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
|
||||
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
|
||||
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
|
||||
#endif
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "nouveau_reg.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "dispnv04/hw.h"
|
||||
#include "dispnv50/disp.h"
|
||||
#include "nouveau_acpi.h"
|
||||
|
||||
#include "nouveau_display.h"
|
||||
@ -509,7 +510,11 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
|
||||
nv_connector->detected_encoder = nv_encoder;
|
||||
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
connector->interlace_allowed = true;
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
|
||||
connector->interlace_allowed =
|
||||
nv_encoder->caps.dp_interlace;
|
||||
else
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = true;
|
||||
} else
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
|
||||
@ -1029,6 +1034,29 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
|
||||
return 112000 * duallink_scale;
|
||||
}
|
||||
|
||||
enum drm_mode_status
|
||||
nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
|
||||
const unsigned min_clock,
|
||||
const unsigned max_clock,
|
||||
unsigned int *clock_out)
|
||||
{
|
||||
unsigned int clock = mode->clock;
|
||||
|
||||
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
|
||||
DRM_MODE_FLAG_3D_FRAME_PACKING)
|
||||
clock *= 2;
|
||||
|
||||
if (clock < min_clock)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (clock_out)
|
||||
*clock_out = clock;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
nouveau_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
@ -1037,7 +1065,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
|
||||
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
|
||||
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
|
||||
unsigned min_clock = 25000, max_clock = min_clock;
|
||||
unsigned clock = mode->clock;
|
||||
|
||||
switch (nv_encoder->dcb->type) {
|
||||
case DCB_OUTPUT_LVDS:
|
||||
@ -1060,25 +1087,14 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
|
||||
case DCB_OUTPUT_TV:
|
||||
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
|
||||
case DCB_OUTPUT_DP:
|
||||
max_clock = nv_encoder->dp.link_nr;
|
||||
max_clock *= nv_encoder->dp.link_bw;
|
||||
clock = clock * (connector->display_info.bpc * 3) / 10;
|
||||
break;
|
||||
return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
|
||||
default:
|
||||
BUG();
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
|
||||
clock *= 2;
|
||||
|
||||
if (clock < min_clock)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static struct drm_encoder *
|
||||
|
@ -195,6 +195,11 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
|
||||
const struct drm_connector_state *,
|
||||
struct drm_property *, u64 *);
|
||||
struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
|
||||
enum drm_mode_status
|
||||
nouveau_conn_mode_clock_valid(const struct drm_display_mode *,
|
||||
const unsigned min_clock,
|
||||
const unsigned max_clock,
|
||||
unsigned *clock);
|
||||
|
||||
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
|
||||
extern int nouveau_backlight_init(struct drm_connector *);
|
||||
|
@ -181,8 +181,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(drm->dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(drm->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
|
||||
pm_runtime_put_autosuspend(drm->dev);
|
||||
if (ret < 0)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
@ -179,41 +180,164 @@ nouveau_display_vblank_init(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
|
||||
.destroy = drm_gem_fb_destroy,
|
||||
.create_handle = drm_gem_fb_create_handle,
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
|
||||
nouveau_decode_mod(struct nouveau_drm *drm,
|
||||
uint64_t modifier,
|
||||
uint32_t *tile_mode,
|
||||
uint8_t *kind)
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
BUG_ON(!tile_mode || !kind);
|
||||
|
||||
if (fb->nvbo)
|
||||
drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
|
||||
if (modifier == DRM_FORMAT_MOD_LINEAR) {
|
||||
/* tile_mode will not be used in this case */
|
||||
*tile_mode = 0;
|
||||
*kind = 0;
|
||||
} else {
|
||||
/*
|
||||
* Extract the block height and kind from the corresponding
|
||||
* modifier fields. See drm_fourcc.h for details.
|
||||
*/
|
||||
*tile_mode = (uint32_t)(modifier & 0xF);
|
||||
*kind = (uint8_t)((modifier >> 12) & 0xFF);
|
||||
|
||||
drm_framebuffer_cleanup(drm_fb);
|
||||
kfree(fb);
|
||||
if (drm->client.device.info.chipset >= 0xc0)
|
||||
*tile_mode <<= 4;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
|
||||
uint32_t *tile_mode,
|
||||
uint8_t *kind)
|
||||
{
|
||||
if (fb->flags & DRM_MODE_FB_MODIFIERS) {
|
||||
struct nouveau_drm *drm = nouveau_drm(fb->dev);
|
||||
|
||||
nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
|
||||
} else {
|
||||
const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
|
||||
*tile_mode = nvbo->mode;
|
||||
*kind = nvbo->kind;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
nouveau_validate_decode_mod(struct nouveau_drm *drm,
|
||||
uint64_t modifier,
|
||||
uint32_t *tile_mode,
|
||||
uint8_t *kind)
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
struct nouveau_display *disp = nouveau_display(drm->dev);
|
||||
int mod;
|
||||
|
||||
return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(!disp->format_modifiers);
|
||||
|
||||
for (mod = 0;
|
||||
(disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
|
||||
(disp->format_modifiers[mod] != modifier);
|
||||
mod++);
|
||||
|
||||
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
nouveau_decode_mod(drm, modifier, tile_mode, kind);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
|
||||
.destroy = nouveau_user_framebuffer_destroy,
|
||||
.create_handle = nouveau_user_framebuffer_create_handle,
|
||||
};
|
||||
static inline uint32_t
|
||||
nouveau_get_width_in_blocks(uint32_t stride)
|
||||
{
|
||||
/* GOBs per block in the x direction is always one, and GOBs are
|
||||
* 64 bytes wide
|
||||
*/
|
||||
static const uint32_t log_block_width = 6;
|
||||
|
||||
return (stride + (1 << log_block_width) - 1) >> log_block_width;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
nouveau_get_height_in_blocks(struct nouveau_drm *drm,
|
||||
uint32_t height,
|
||||
uint32_t log_block_height_in_gobs)
|
||||
{
|
||||
uint32_t log_gob_height;
|
||||
uint32_t log_block_height;
|
||||
|
||||
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
|
||||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
log_gob_height = 2;
|
||||
else
|
||||
log_gob_height = 3;
|
||||
|
||||
log_block_height = log_block_height_in_gobs + log_gob_height;
|
||||
|
||||
return (height + (1 << log_block_height) - 1) >> log_block_height;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
|
||||
uint32_t offset, uint32_t stride, uint32_t h,
|
||||
uint32_t tile_mode)
|
||||
{
|
||||
uint32_t gob_size, bw, bh;
|
||||
uint64_t bl_size;
|
||||
|
||||
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
|
||||
|
||||
if (drm->client.device.info.chipset >= 0xc0) {
|
||||
if (tile_mode & 0xF)
|
||||
return -EINVAL;
|
||||
tile_mode >>= 4;
|
||||
}
|
||||
|
||||
if (tile_mode & 0xFFFFFFF0)
|
||||
return -EINVAL;
|
||||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
gob_size = 256;
|
||||
else
|
||||
gob_size = 512;
|
||||
|
||||
bw = nouveau_get_width_in_blocks(stride);
|
||||
bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
|
||||
|
||||
bl_size = bw * bh * (1 << tile_mode) * gob_size;
|
||||
|
||||
DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
|
||||
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
|
||||
nvbo->bo.mem.size);
|
||||
|
||||
if (bl_size + offset > nvbo->bo.mem.size)
|
||||
return -ERANGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_framebuffer_new(struct drm_device *dev,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct nouveau_bo *nvbo,
|
||||
struct nouveau_framebuffer **pfb)
|
||||
struct drm_gem_object *gem,
|
||||
struct drm_framebuffer **pfb)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_framebuffer *fb;
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct drm_framebuffer *fb;
|
||||
const struct drm_format_info *info;
|
||||
unsigned int width, height, i;
|
||||
uint32_t tile_mode;
|
||||
uint8_t kind;
|
||||
int ret;
|
||||
|
||||
/* YUV overlays have special requirements pre-NV50 */
|
||||
@ -236,13 +360,50 @@ nouveau_framebuffer_new(struct drm_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
|
||||
if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
|
||||
&tile_mode, &kind)) {
|
||||
DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
|
||||
mode_cmd->modifier[0]);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
tile_mode = nvbo->mode;
|
||||
kind = nvbo->kind;
|
||||
}
|
||||
|
||||
info = drm_get_format_info(dev, mode_cmd);
|
||||
|
||||
for (i = 0; i < info->num_planes; i++) {
|
||||
width = drm_format_info_plane_width(info,
|
||||
mode_cmd->width,
|
||||
i);
|
||||
height = drm_format_info_plane_height(info,
|
||||
mode_cmd->height,
|
||||
i);
|
||||
|
||||
if (kind) {
|
||||
ret = nouveau_check_bl_size(drm, nvbo,
|
||||
mode_cmd->offsets[i],
|
||||
mode_cmd->pitches[i],
|
||||
height, tile_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
uint32_t size = mode_cmd->pitches[i] * height;
|
||||
|
||||
if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
|
||||
fb->nvbo = nvbo;
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
|
||||
fb->obj[0] = gem;
|
||||
|
||||
ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
|
||||
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
|
||||
if (ret)
|
||||
kfree(fb);
|
||||
return ret;
|
||||
@ -253,19 +414,17 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
struct nouveau_framebuffer *fb;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_gem_object *gem;
|
||||
int ret;
|
||||
|
||||
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
|
||||
if (!gem)
|
||||
return ERR_PTR(-ENOENT);
|
||||
nvbo = nouveau_gem_object(gem);
|
||||
|
||||
ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
|
||||
ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
|
||||
if (ret == 0)
|
||||
return &fb->base;
|
||||
return fb;
|
||||
|
||||
drm_gem_object_put_unlocked(gem);
|
||||
return ERR_PTR(ret);
|
||||
@ -517,6 +676,7 @@ nouveau_display_create(struct drm_device *dev)
|
||||
|
||||
dev->mode_config.preferred_depth = 24;
|
||||
dev->mode_config.prefer_shadow = 1;
|
||||
dev->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
if (drm->client.device.info.chipset < 0x11)
|
||||
dev->mode_config.async_page_flip = false;
|
||||
|
@ -8,26 +8,11 @@
|
||||
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
||||
struct nouveau_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct nouveau_vma *vma;
|
||||
u32 r_handle;
|
||||
u32 r_format;
|
||||
u32 r_pitch;
|
||||
struct nvif_object h_base[4];
|
||||
struct nvif_object h_core;
|
||||
};
|
||||
|
||||
static inline struct nouveau_framebuffer *
|
||||
nouveau_framebuffer(struct drm_framebuffer *fb)
|
||||
{
|
||||
return container_of(fb, struct nouveau_framebuffer, base);
|
||||
}
|
||||
|
||||
int nouveau_framebuffer_new(struct drm_device *,
|
||||
const struct drm_mode_fb_cmd2 *,
|
||||
struct nouveau_bo *, struct nouveau_framebuffer **);
|
||||
int
|
||||
nouveau_framebuffer_new(struct drm_device *dev,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *gem,
|
||||
struct drm_framebuffer **pfb);
|
||||
|
||||
struct nouveau_display {
|
||||
void *priv;
|
||||
@ -47,6 +32,8 @@ struct nouveau_display {
|
||||
struct drm_property *color_vibrance_property;
|
||||
|
||||
struct drm_atomic_state *suspend;
|
||||
|
||||
const u64 *format_modifiers;
|
||||
};
|
||||
|
||||
static inline struct nouveau_display *
|
||||
@ -75,6 +62,10 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
|
||||
|
||||
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
|
||||
|
||||
void
|
||||
nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
|
||||
uint8_t *kind);
|
||||
|
||||
struct drm_framebuffer *
|
||||
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
|
||||
const struct drm_mode_fb_cmd2 *);
|
||||
|
@ -25,12 +25,14 @@
|
||||
#include "nouveau_dma.h"
|
||||
#include "nouveau_mem.h"
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_svm.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/object.h>
|
||||
#include <nvif/if000c.h>
|
||||
#include <nvif/if500b.h>
|
||||
#include <nvif/if900b.h>
|
||||
#include <nvif/if000c.h>
|
||||
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/hmm.h>
|
||||
@ -54,66 +56,69 @@ enum nouveau_aper {
|
||||
typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
|
||||
enum nouveau_aper, u64 dst_addr,
|
||||
enum nouveau_aper, u64 src_addr);
|
||||
typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
|
||||
enum nouveau_aper, u64 dst_addr);
|
||||
|
||||
struct nouveau_dmem_chunk {
|
||||
struct list_head list;
|
||||
struct nouveau_bo *bo;
|
||||
struct nouveau_drm *drm;
|
||||
unsigned long pfn_first;
|
||||
unsigned long callocated;
|
||||
unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
|
||||
spinlock_t lock;
|
||||
struct dev_pagemap pagemap;
|
||||
};
|
||||
|
||||
struct nouveau_dmem_migrate {
|
||||
nouveau_migrate_copy_t copy_func;
|
||||
nouveau_clear_page_t clear_func;
|
||||
struct nouveau_channel *chan;
|
||||
};
|
||||
|
||||
struct nouveau_dmem {
|
||||
struct nouveau_drm *drm;
|
||||
struct dev_pagemap pagemap;
|
||||
struct nouveau_dmem_migrate migrate;
|
||||
struct list_head chunk_free;
|
||||
struct list_head chunk_full;
|
||||
struct list_head chunk_empty;
|
||||
struct list_head chunks;
|
||||
struct mutex mutex;
|
||||
struct page *free_pages;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static inline struct nouveau_dmem *page_to_dmem(struct page *page)
|
||||
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
|
||||
{
|
||||
return container_of(page->pgmap, struct nouveau_dmem, pagemap);
|
||||
return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
|
||||
}
|
||||
|
||||
static struct nouveau_drm *page_to_drm(struct page *page)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
|
||||
|
||||
return chunk->drm;
|
||||
}
|
||||
|
||||
static unsigned long nouveau_dmem_page_addr(struct page *page)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
|
||||
unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
|
||||
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
|
||||
unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
|
||||
chunk->pagemap.res.start;
|
||||
|
||||
return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
|
||||
return chunk->bo->bo.offset + off;
|
||||
}
|
||||
|
||||
static void nouveau_dmem_page_free(struct page *page)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk = page->zone_device_data;
|
||||
unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
|
||||
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
|
||||
struct nouveau_dmem *dmem = chunk->drm->dmem;
|
||||
|
||||
spin_lock(&dmem->lock);
|
||||
page->zone_device_data = dmem->free_pages;
|
||||
dmem->free_pages = page;
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
* This is really a bad example, we need to overhaul nouveau memory
|
||||
* management to be more page focus and allow lighter locking scheme
|
||||
* to be use in the process.
|
||||
*/
|
||||
spin_lock(&chunk->lock);
|
||||
clear_bit(idx, chunk->bitmap);
|
||||
WARN_ON(!chunk->callocated);
|
||||
chunk->callocated--;
|
||||
/*
|
||||
* FIXME when chunk->callocated reach 0 we should add the chunk to
|
||||
* a reclaim list so that it can be freed in case of memory pressure.
|
||||
*/
|
||||
spin_unlock(&chunk->lock);
|
||||
spin_unlock(&dmem->lock);
|
||||
}
|
||||
|
||||
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
|
||||
@ -165,8 +170,8 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
|
||||
|
||||
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
|
||||
{
|
||||
struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
|
||||
struct nouveau_drm *drm = dmem->drm;
|
||||
struct nouveau_drm *drm = page_to_drm(vmf->page);
|
||||
struct nouveau_dmem *dmem = drm->dmem;
|
||||
struct nouveau_fence *fence;
|
||||
unsigned long src = 0, dst = 0;
|
||||
dma_addr_t dma_addr = 0;
|
||||
@ -209,131 +214,105 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
|
||||
nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk;
|
||||
struct resource *res;
|
||||
struct page *page;
|
||||
void *ptr;
|
||||
unsigned long i, pfn_first;
|
||||
int ret;
|
||||
|
||||
if (drm->dmem == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
|
||||
struct nouveau_dmem_chunk,
|
||||
list);
|
||||
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
|
||||
if (chunk == NULL) {
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&chunk->list);
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
/* Allocate unused physical address space for device private pages. */
|
||||
res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
|
||||
"nouveau_dmem");
|
||||
if (IS_ERR(res)) {
|
||||
ret = PTR_ERR(res);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
chunk->drm = drm;
|
||||
chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
||||
chunk->pagemap.res = *res;
|
||||
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
|
||||
chunk->pagemap.owner = drm->dev;
|
||||
|
||||
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
|
||||
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
|
||||
&chunk->bo);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_release;
|
||||
|
||||
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, &chunk->bo);
|
||||
goto out;
|
||||
if (ret)
|
||||
goto out_bo_free;
|
||||
|
||||
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
goto out_bo_unpin;
|
||||
}
|
||||
|
||||
bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
|
||||
spin_lock_init(&chunk->lock);
|
||||
|
||||
out:
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
if (chunk->bo)
|
||||
list_add(&chunk->list, &drm->dmem->chunk_empty);
|
||||
else
|
||||
list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
|
||||
list_add(&chunk->list, &drm->dmem->chunks);
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nouveau_dmem_chunk *
|
||||
nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk;
|
||||
|
||||
chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
|
||||
struct nouveau_dmem_chunk,
|
||||
list);
|
||||
if (chunk)
|
||||
return chunk;
|
||||
|
||||
chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
|
||||
struct nouveau_dmem_chunk,
|
||||
list);
|
||||
if (chunk->bo)
|
||||
return chunk;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
|
||||
unsigned long npages,
|
||||
unsigned long *pages)
|
||||
{
|
||||
struct nouveau_dmem_chunk *chunk;
|
||||
unsigned long c;
|
||||
int ret;
|
||||
|
||||
memset(pages, 0xff, npages * sizeof(*pages));
|
||||
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
for (c = 0; c < npages;) {
|
||||
unsigned long i;
|
||||
|
||||
chunk = nouveau_dmem_chunk_first_free_locked(drm);
|
||||
if (chunk == NULL) {
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
ret = nouveau_dmem_chunk_alloc(drm);
|
||||
if (ret) {
|
||||
if (c)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&chunk->lock);
|
||||
i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
|
||||
while (i < DMEM_CHUNK_NPAGES && c < npages) {
|
||||
pages[c] = chunk->pfn_first + i;
|
||||
set_bit(i, chunk->bitmap);
|
||||
chunk->callocated++;
|
||||
c++;
|
||||
|
||||
i = find_next_zero_bit(chunk->bitmap,
|
||||
DMEM_CHUNK_NPAGES, i);
|
||||
}
|
||||
spin_unlock(&chunk->lock);
|
||||
pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
|
||||
page = pfn_to_page(pfn_first);
|
||||
spin_lock(&drm->dmem->lock);
|
||||
for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
|
||||
page->zone_device_data = drm->dmem->free_pages;
|
||||
drm->dmem->free_pages = page;
|
||||
}
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
*ppage = page;
|
||||
chunk->callocated++;
|
||||
spin_unlock(&drm->dmem->lock);
|
||||
|
||||
NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
|
||||
DMEM_CHUNK_SIZE >> 20);
|
||||
|
||||
return 0;
|
||||
|
||||
out_bo_unpin:
|
||||
nouveau_bo_unpin(chunk->bo);
|
||||
out_bo_free:
|
||||
nouveau_bo_ref(NULL, &chunk->bo);
|
||||
out_release:
|
||||
release_mem_region(chunk->pagemap.res.start,
|
||||
resource_size(&chunk->pagemap.res));
|
||||
out_free:
|
||||
kfree(chunk);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct page *
|
||||
nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
|
||||
{
|
||||
unsigned long pfns[1];
|
||||
struct page *page;
|
||||
struct nouveau_dmem_chunk *chunk;
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
/* FIXME stop all the miss-match API ... */
|
||||
ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
|
||||
if (ret)
|
||||
return NULL;
|
||||
spin_lock(&drm->dmem->lock);
|
||||
if (drm->dmem->free_pages) {
|
||||
page = drm->dmem->free_pages;
|
||||
drm->dmem->free_pages = page->zone_device_data;
|
||||
chunk = nouveau_page_to_chunk(page);
|
||||
chunk->callocated++;
|
||||
spin_unlock(&drm->dmem->lock);
|
||||
} else {
|
||||
spin_unlock(&drm->dmem->lock);
|
||||
ret = nouveau_dmem_chunk_alloc(drm, &page);
|
||||
if (ret)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page = pfn_to_page(pfns[0]);
|
||||
get_page(page);
|
||||
lock_page(page);
|
||||
return page;
|
||||
@ -356,12 +335,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
|
||||
return;
|
||||
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
|
||||
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
|
||||
/* FIXME handle pin failure */
|
||||
WARN_ON(ret);
|
||||
}
|
||||
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
|
||||
list_for_each_entry(chunk, &drm->dmem->chunks, list) {
|
||||
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
|
||||
/* FIXME handle pin failure */
|
||||
WARN_ON(ret);
|
||||
@ -378,12 +352,8 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
|
||||
return;
|
||||
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
|
||||
list_for_each_entry(chunk, &drm->dmem->chunks, list)
|
||||
nouveau_bo_unpin(chunk->bo);
|
||||
}
|
||||
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
|
||||
nouveau_bo_unpin(chunk->bo);
|
||||
}
|
||||
mutex_unlock(&drm->dmem->mutex);
|
||||
}
|
||||
|
||||
@ -397,15 +367,13 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
|
||||
|
||||
mutex_lock(&drm->dmem->mutex);
|
||||
|
||||
WARN_ON(!list_empty(&drm->dmem->chunk_free));
|
||||
WARN_ON(!list_empty(&drm->dmem->chunk_full));
|
||||
|
||||
list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
|
||||
if (chunk->bo) {
|
||||
nouveau_bo_unpin(chunk->bo);
|
||||
nouveau_bo_ref(NULL, &chunk->bo);
|
||||
}
|
||||
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
|
||||
nouveau_bo_unpin(chunk->bo);
|
||||
nouveau_bo_ref(NULL, &chunk->bo);
|
||||
list_del(&chunk->list);
|
||||
memunmap_pages(&chunk->pagemap);
|
||||
release_mem_region(chunk->pagemap.res.start,
|
||||
resource_size(&chunk->pagemap.res));
|
||||
kfree(chunk);
|
||||
}
|
||||
|
||||
@ -471,6 +439,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
|
||||
enum nouveau_aper dst_aper, u64 dst_addr)
|
||||
{
|
||||
struct nouveau_channel *chan = drm->dmem->migrate.chan;
|
||||
u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
|
||||
(1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
|
||||
(1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
|
||||
(1 << 2) /* FLUSH_ENABLE_TRUE. */ |
|
||||
(2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
|
||||
u32 remap = (4 << 0) /* DST_X_CONST_A */ |
|
||||
(5 << 4) /* DST_Y_CONST_B */ |
|
||||
(3 << 16) /* COMPONENT_SIZE_FOUR */ |
|
||||
(1 << 24) /* NUM_DST_COMPONENTS_TWO */;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, 12);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (dst_aper) {
|
||||
case NOUVEAU_APER_VRAM:
|
||||
BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
|
||||
break;
|
||||
case NOUVEAU_APER_HOST:
|
||||
BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
|
||||
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
|
||||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, remap);
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
|
||||
OUT_RING(chan, upper_32_bits(dst_addr));
|
||||
OUT_RING(chan, lower_32_bits(dst_addr));
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
|
||||
OUT_RING(chan, length >> 3);
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
|
||||
OUT_RING(chan, launch_dma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_dmem_migrate_init(struct nouveau_drm *drm)
|
||||
{
|
||||
@ -480,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
|
||||
case VOLTA_DMA_COPY_A:
|
||||
case TURING_DMA_COPY_A:
|
||||
drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
|
||||
drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
|
||||
drm->dmem->migrate.chan = drm->ttm.chan;
|
||||
return 0;
|
||||
default:
|
||||
@ -491,9 +506,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
|
||||
void
|
||||
nouveau_dmem_init(struct nouveau_drm *drm)
|
||||
{
|
||||
struct device *device = drm->dev->dev;
|
||||
struct resource *res;
|
||||
unsigned long i, size, pfn_first;
|
||||
int ret;
|
||||
|
||||
/* This only make sense on PASCAL or newer */
|
||||
@ -505,84 +517,53 @@ nouveau_dmem_init(struct nouveau_drm *drm)
|
||||
|
||||
drm->dmem->drm = drm;
|
||||
mutex_init(&drm->dmem->mutex);
|
||||
INIT_LIST_HEAD(&drm->dmem->chunk_free);
|
||||
INIT_LIST_HEAD(&drm->dmem->chunk_full);
|
||||
INIT_LIST_HEAD(&drm->dmem->chunk_empty);
|
||||
|
||||
size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
|
||||
INIT_LIST_HEAD(&drm->dmem->chunks);
|
||||
mutex_init(&drm->dmem->mutex);
|
||||
spin_lock_init(&drm->dmem->lock);
|
||||
|
||||
/* Initialize migration dma helpers before registering memory */
|
||||
ret = nouveau_dmem_migrate_init(drm);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* FIXME we need some kind of policy to decide how much VRAM we
|
||||
* want to register with HMM. For now just register everything
|
||||
* and latter if we want to do thing like over commit then we
|
||||
* could revisit this.
|
||||
*/
|
||||
res = devm_request_free_mem_region(device, &iomem_resource, size);
|
||||
if (IS_ERR(res))
|
||||
goto out_free;
|
||||
drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
||||
drm->dmem->pagemap.res = *res;
|
||||
drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
|
||||
drm->dmem->pagemap.owner = drm->dev;
|
||||
if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
|
||||
goto out_free;
|
||||
|
||||
pfn_first = res->start >> PAGE_SHIFT;
|
||||
for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
|
||||
struct nouveau_dmem_chunk *chunk;
|
||||
struct page *page;
|
||||
unsigned long j;
|
||||
|
||||
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
|
||||
if (chunk == NULL) {
|
||||
nouveau_dmem_fini(drm);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->drm = drm;
|
||||
chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
|
||||
list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
|
||||
|
||||
page = pfn_to_page(chunk->pfn_first);
|
||||
for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
|
||||
page->zone_device_data = chunk;
|
||||
if (ret) {
|
||||
kfree(drm->dmem);
|
||||
drm->dmem = NULL;
|
||||
}
|
||||
|
||||
NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
|
||||
return;
|
||||
out_free:
|
||||
kfree(drm->dmem);
|
||||
drm->dmem = NULL;
|
||||
}
|
||||
|
||||
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
|
||||
unsigned long src, dma_addr_t *dma_addr)
|
||||
unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
|
||||
{
|
||||
struct device *dev = drm->dev->dev;
|
||||
struct page *dpage, *spage;
|
||||
unsigned long paddr;
|
||||
|
||||
spage = migrate_pfn_to_page(src);
|
||||
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
|
||||
if (!(src & MIGRATE_PFN_MIGRATE))
|
||||
goto out;
|
||||
|
||||
dpage = nouveau_dmem_page_alloc_locked(drm);
|
||||
if (!dpage)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, *dma_addr))
|
||||
goto out_free_page;
|
||||
|
||||
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
|
||||
nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
|
||||
*dma_addr))
|
||||
goto out_dma_unmap;
|
||||
paddr = nouveau_dmem_page_addr(dpage);
|
||||
if (spage) {
|
||||
*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, *dma_addr))
|
||||
goto out_free_page;
|
||||
if (drm->dmem->migrate.copy_func(drm, page_size(spage),
|
||||
NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
|
||||
goto out_dma_unmap;
|
||||
} else {
|
||||
*dma_addr = DMA_MAPPING_ERROR;
|
||||
if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
|
||||
NOUVEAU_APER_VRAM, paddr))
|
||||
goto out_free_page;
|
||||
}
|
||||
|
||||
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
|
||||
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
|
||||
if (src & MIGRATE_PFN_WRITE)
|
||||
*pfn |= NVIF_VMM_PFNMAP_V0_W;
|
||||
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
|
||||
|
||||
out_dma_unmap:
|
||||
@ -590,19 +571,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
|
||||
out_free_page:
|
||||
nouveau_dmem_page_free_locked(drm, dpage);
|
||||
out:
|
||||
*pfn = NVIF_VMM_PFNMAP_V0_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
|
||||
struct migrate_vma *args, dma_addr_t *dma_addrs)
|
||||
struct nouveau_svmm *svmm, struct migrate_vma *args,
|
||||
dma_addr_t *dma_addrs, u64 *pfns)
|
||||
{
|
||||
struct nouveau_fence *fence;
|
||||
unsigned long addr = args->start, nr_dma = 0, i;
|
||||
|
||||
for (i = 0; addr < args->end; i++) {
|
||||
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
|
||||
dma_addrs + nr_dma);
|
||||
if (args->dst[i])
|
||||
dma_addrs + nr_dma, pfns + i);
|
||||
if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
|
||||
nr_dma++;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
@ -610,20 +593,18 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
|
||||
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
|
||||
migrate_vma_pages(args);
|
||||
nouveau_dmem_fence_done(&fence);
|
||||
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
|
||||
|
||||
while (nr_dma--) {
|
||||
dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
/*
|
||||
* FIXME optimization: update GPU page table to point to newly migrated
|
||||
* memory.
|
||||
*/
|
||||
migrate_vma_finalize(args);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
struct nouveau_svmm *svmm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
@ -635,9 +616,13 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
.vma = vma,
|
||||
.start = start,
|
||||
};
|
||||
unsigned long c, i;
|
||||
unsigned long i;
|
||||
u64 *pfns;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (drm->dmem == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
|
||||
if (!args.src)
|
||||
goto out;
|
||||
@ -649,19 +634,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
if (!dma_addrs)
|
||||
goto out_free_dst;
|
||||
|
||||
for (i = 0; i < npages; i += c) {
|
||||
c = min(SG_MAX_SINGLE_ALLOC, npages);
|
||||
args.end = start + (c << PAGE_SHIFT);
|
||||
pfns = nouveau_pfns_alloc(max);
|
||||
if (!pfns)
|
||||
goto out_free_dma;
|
||||
|
||||
for (i = 0; i < npages; i += max) {
|
||||
args.end = start + (max << PAGE_SHIFT);
|
||||
ret = migrate_vma_setup(&args);
|
||||
if (ret)
|
||||
goto out_free_dma;
|
||||
goto out_free_pfns;
|
||||
|
||||
if (args.cpages)
|
||||
nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
|
||||
nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
|
||||
pfns);
|
||||
args.start = args.end;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out_free_pfns:
|
||||
nouveau_pfns_free(pfns);
|
||||
out_free_dma:
|
||||
kfree(dma_addrs);
|
||||
out_free_dst:
|
||||
|
@ -25,6 +25,7 @@
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
struct nouveau_drm;
|
||||
struct nouveau_svmm;
|
||||
struct hmm_range;
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
|
||||
@ -34,6 +35,7 @@ void nouveau_dmem_suspend(struct nouveau_drm *);
|
||||
void nouveau_dmem_resume(struct nouveau_drm *);
|
||||
|
||||
int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
|
||||
struct nouveau_svmm *svmm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
@ -98,3 +98,34 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
|
||||
return NOUVEAU_DP_SST;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
* - Use the minimum possible BPC here, once we add support for the max bpc
|
||||
* property.
|
||||
* - Validate the mode against downstream port caps (see
|
||||
* drm_dp_downstream_max_clock())
|
||||
* - Validate against the DP caps advertised by the GPU (we don't check these
|
||||
* yet)
|
||||
*/
|
||||
enum drm_mode_status
|
||||
nv50_dp_mode_valid(struct drm_connector *connector,
|
||||
struct nouveau_encoder *outp,
|
||||
const struct drm_display_mode *mode,
|
||||
unsigned *out_clock)
|
||||
{
|
||||
const unsigned min_clock = 25000;
|
||||
unsigned max_clock, clock;
|
||||
enum drm_mode_status ret;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
|
||||
return MODE_NO_INTERLACE;
|
||||
|
||||
max_clock = outp->dp.link_nr * outp->dp.link_bw;
|
||||
clock = mode->clock * (connector->display_info.bpc * 3) / 10;
|
||||
|
||||
ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
|
||||
&clock);
|
||||
if (out_clock)
|
||||
*out_clock = clock;
|
||||
return ret;
|
||||
}
|
||||
|
@ -681,8 +681,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
|
||||
{
|
||||
struct nvkm_device *device;
|
||||
struct drm_device *drm_dev;
|
||||
struct apertures_struct *aper;
|
||||
bool boot = false;
|
||||
int ret;
|
||||
|
||||
if (vga_switcheroo_client_probe_defer(pdev))
|
||||
@ -699,32 +697,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
|
||||
nvkm_device_del(&device);
|
||||
|
||||
/* Remove conflicting drivers (vesafb, efifb etc). */
|
||||
aper = alloc_apertures(3);
|
||||
if (!aper)
|
||||
return -ENOMEM;
|
||||
|
||||
aper->ranges[0].base = pci_resource_start(pdev, 1);
|
||||
aper->ranges[0].size = pci_resource_len(pdev, 1);
|
||||
aper->count = 1;
|
||||
|
||||
if (pci_resource_len(pdev, 2)) {
|
||||
aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
|
||||
aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
|
||||
aper->count++;
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, 3)) {
|
||||
aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
|
||||
aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
|
||||
aper->count++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
||||
#endif
|
||||
if (nouveau_modeset != 2)
|
||||
drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
|
||||
kfree(aper);
|
||||
ret = remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
|
||||
true, true, ~0ULL, &device);
|
||||
|
@ -66,6 +66,10 @@ struct nouveau_encoder {
|
||||
} dp;
|
||||
};
|
||||
|
||||
struct {
|
||||
bool dp_interlace : 1;
|
||||
} caps;
|
||||
|
||||
void (*enc_save)(struct drm_encoder *encoder);
|
||||
void (*enc_restore)(struct drm_encoder *encoder);
|
||||
void (*update)(struct nouveau_encoder *, u8 head,
|
||||
@ -100,6 +104,10 @@ enum nouveau_dp_status {
|
||||
};
|
||||
|
||||
int nouveau_dp_detect(struct nouveau_encoder *);
|
||||
enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
|
||||
struct nouveau_encoder *,
|
||||
const struct drm_display_mode *,
|
||||
unsigned *clock);
|
||||
|
||||
struct nouveau_connector *
|
||||
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
|
||||
|
@ -312,7 +312,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct fb_info *info;
|
||||
struct nouveau_framebuffer *fb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
@ -335,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
|
||||
ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
|
||||
chan = nouveau_nofbaccel ? NULL : drm->channel;
|
||||
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
|
||||
ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
|
||||
chan = NULL;
|
||||
@ -367,7 +367,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
}
|
||||
|
||||
/* setup helper */
|
||||
fbcon->helper.fb = &fb->base;
|
||||
fbcon->helper.fb = fb;
|
||||
|
||||
if (!chan)
|
||||
info->flags = FBINFO_HWACCEL_DISABLED;
|
||||
@ -376,12 +376,12 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->fbops = &nouveau_fbcon_sw_ops;
|
||||
info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
|
||||
fb->nvbo->bo.mem.bus.offset;
|
||||
info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
info->fix.smem_start = nvbo->bo.mem.bus.base +
|
||||
nvbo->bo.mem.bus.offset;
|
||||
info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
|
||||
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
|
||||
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
|
||||
info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
|
||||
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
|
||||
|
||||
@ -393,19 +393,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
|
||||
/* To allow resizeing without swapping buffers */
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
|
||||
fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
|
||||
fb->width, fb->height, nvbo->bo.offset, nvbo);
|
||||
|
||||
vga_switcheroo_client_fb_set(dev->pdev, info);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
if (chan)
|
||||
nouveau_vma_del(&fb->vma);
|
||||
nouveau_bo_unmap(fb->nvbo);
|
||||
nouveau_vma_del(&fbcon->vma);
|
||||
nouveau_bo_unmap(nvbo);
|
||||
out_unpin:
|
||||
nouveau_bo_unpin(fb->nvbo);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
out_unref:
|
||||
nouveau_bo_ref(NULL, &fb->nvbo);
|
||||
nouveau_bo_ref(NULL, &nvbo);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -413,16 +413,18 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
static int
|
||||
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
||||
{
|
||||
struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
|
||||
struct drm_framebuffer *fb = fbcon->helper.fb;
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
drm_fb_helper_unregister_fbi(&fbcon->helper);
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
|
||||
if (nouveau_fb && nouveau_fb->nvbo) {
|
||||
nouveau_vma_del(&nouveau_fb->vma);
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
nouveau_bo_unpin(nouveau_fb->nvbo);
|
||||
drm_framebuffer_put(&nouveau_fb->base);
|
||||
if (fb && fb->obj[0]) {
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
nouveau_vma_del(&fbcon->vma);
|
||||
nouveau_bo_unmap(nvbo);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
drm_framebuffer_put(fb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -31,6 +31,8 @@
|
||||
|
||||
#include "nouveau_display.h"
|
||||
|
||||
struct nouveau_vma;
|
||||
|
||||
struct nouveau_fbdev {
|
||||
struct drm_fb_helper helper; /* must be first */
|
||||
unsigned int saved_flags;
|
||||
@ -41,6 +43,7 @@ struct nouveau_fbdev {
|
||||
struct nvif_object gdi;
|
||||
struct nvif_object blit;
|
||||
struct nvif_object twod;
|
||||
struct nouveau_vma *vma;
|
||||
|
||||
struct mutex hotplug_lock;
|
||||
bool hotplug_waiting;
|
||||
|
@ -76,8 +76,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
return ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_vma_new(nvbo, vmm, &vma);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
@ -157,8 +159,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
|
||||
nouveau_gem_object_unmap(nvbo, vma);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
}
|
||||
}
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
|
@ -95,14 +95,3 @@ struct platform_driver nouveau_platform_driver = {
|
||||
.probe = nouveau_platform_probe,
|
||||
.remove = nouveau_platform_remove,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
|
||||
MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
|
||||
#endif
|
||||
|
@ -70,6 +70,12 @@ struct nouveau_svm {
|
||||
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
|
||||
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
|
||||
|
||||
struct nouveau_pfnmap_args {
|
||||
struct nvif_ioctl_v0 i;
|
||||
struct nvif_ioctl_mthd_v0 m;
|
||||
struct nvif_vmm_pfnmap_v0 p;
|
||||
};
|
||||
|
||||
struct nouveau_ivmm {
|
||||
struct nouveau_svmm *svmm;
|
||||
u64 inst;
|
||||
@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
|
||||
addr = max(addr, vma->vm_start);
|
||||
next = min(vma->vm_end, end);
|
||||
/* This is a best effort so we ignore errors */
|
||||
nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
|
||||
nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
|
||||
next);
|
||||
addr = next;
|
||||
}
|
||||
|
||||
@ -784,6 +791,56 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||
return NVIF_NOTIFY_KEEP;
|
||||
}
|
||||
|
||||
static struct nouveau_pfnmap_args *
|
||||
nouveau_pfns_to_args(void *pfns)
|
||||
{
|
||||
return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
|
||||
}
|
||||
|
||||
u64 *
|
||||
nouveau_pfns_alloc(unsigned long npages)
|
||||
{
|
||||
struct nouveau_pfnmap_args *args;
|
||||
|
||||
args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
|
||||
if (!args)
|
||||
return NULL;
|
||||
|
||||
args->i.type = NVIF_IOCTL_V0_MTHD;
|
||||
args->m.method = NVIF_VMM_V0_PFNMAP;
|
||||
args->p.page = PAGE_SHIFT;
|
||||
|
||||
return args->p.phys;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_pfns_free(u64 *pfns)
|
||||
{
|
||||
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
|
||||
|
||||
kfree(args);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
|
||||
unsigned long addr, u64 *pfns, unsigned long npages)
|
||||
{
|
||||
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
|
||||
int ret;
|
||||
|
||||
args->p.addr = addr;
|
||||
args->p.size = npages << PAGE_SHIFT;
|
||||
|
||||
mutex_lock(&svmm->mutex);
|
||||
|
||||
svmm->vmm->vmm.object.client->super = true;
|
||||
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
|
||||
npages * sizeof(args->p.phys[0]), NULL);
|
||||
svmm->vmm->vmm.object.client->super = false;
|
||||
|
||||
mutex_unlock(&svmm->mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
|
||||
{
|
||||
|
@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
|
||||
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
|
||||
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
|
||||
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
|
||||
|
||||
u64 *nouveau_pfns_alloc(unsigned long npages);
|
||||
void nouveau_pfns_free(u64 *pfns);
|
||||
void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
|
||||
unsigned long addr, u64 *pfns, unsigned long npages);
|
||||
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
|
||||
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
|
||||
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
|
||||
|
@ -149,7 +149,6 @@ int
|
||||
nv50_fbcon_accel_init(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
|
||||
struct drm_device *dev = nfbdev->helper.dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_channel *chan = drm->channel;
|
||||
@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
|
||||
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
|
||||
OUT_RING(chan, format);
|
||||
OUT_RING(chan, 1);
|
||||
@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING(chan, info->fix.line_length);
|
||||
OUT_RING(chan, info->var.xres_virtual);
|
||||
OUT_RING(chan, info->var.yres_virtual);
|
||||
OUT_RING(chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(fb->vma->addr));
|
||||
OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
|
||||
OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
|
||||
FIRE_RING(chan);
|
||||
|
||||
return 0;
|
||||
|
@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbdev *nfbdev = info->par;
|
||||
struct drm_device *dev = nfbdev->helper.dev;
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_channel *chan = drm->channel;
|
||||
int ret, format;
|
||||
@ -240,8 +239,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
|
||||
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
|
||||
OUT_RING (chan, format);
|
||||
OUT_RING (chan, 1);
|
||||
@ -251,8 +250,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
|
||||
OUT_RING (chan, info->fix.line_length);
|
||||
OUT_RING (chan, info->var.xres_virtual);
|
||||
OUT_RING (chan, info->var.yres_virtual);
|
||||
OUT_RING (chan, upper_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(fb->vma->addr));
|
||||
OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
|
||||
OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
|
||||
FIRE_RING (chan);
|
||||
|
||||
return 0;
|
||||
|
@ -140,7 +140,7 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
|
||||
{
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nvkm_memory *memory;
|
||||
int ret = -ENOSYS;
|
||||
int ret;
|
||||
|
||||
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
|
||||
return -ENOSYS;
|
||||
|
@ -221,3 +221,14 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
|
||||
__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
|
||||
subdev->debug = nvkm_dbgopt(device->dbgopt, name);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_subdev_new_(const struct nvkm_subdev_func *func,
|
||||
struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **psubdev)
|
||||
{
|
||||
if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(func, device, index, *psubdev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2924,6 +2924,20 @@ nvkm_device_del(struct nvkm_device **pdevice)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nvkm_device_endianness(struct nvkm_device *device)
|
||||
{
|
||||
u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
|
||||
#ifdef __BIG_ENDIAN
|
||||
if (!boot1)
|
||||
return false;
|
||||
#else
|
||||
if (boot1)
|
||||
return false;
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
const struct nvkm_device_quirk *quirk,
|
||||
@ -2934,8 +2948,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
{
|
||||
struct nvkm_subdev *subdev;
|
||||
u64 mmio_base, mmio_size;
|
||||
u32 boot0, strap;
|
||||
void __iomem *map;
|
||||
u32 boot0, boot1, strap;
|
||||
int ret = -EEXIST, i;
|
||||
unsigned chipset;
|
||||
|
||||
@ -2961,26 +2974,30 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
mmio_base = device->func->resource_addr(device, 0);
|
||||
mmio_size = device->func->resource_size(device, 0);
|
||||
|
||||
if (detect || mmio) {
|
||||
device->pri = ioremap(mmio_base, mmio_size);
|
||||
if (device->pri == NULL) {
|
||||
nvdev_error(device, "unable to map PRI\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* identify the chipset, and determine classes of subdev/engines */
|
||||
if (detect) {
|
||||
map = ioremap(mmio_base, 0x102000);
|
||||
if (ret = -ENOMEM, map == NULL)
|
||||
goto done;
|
||||
|
||||
/* switch mmio to cpu's native endianness */
|
||||
#ifndef __BIG_ENDIAN
|
||||
if (ioread32_native(map + 0x000004) != 0x00000000) {
|
||||
#else
|
||||
if (ioread32_native(map + 0x000004) == 0x00000000) {
|
||||
#endif
|
||||
iowrite32_native(0x01000001, map + 0x000004);
|
||||
ioread32_native(map);
|
||||
if (!nvkm_device_endianness(device)) {
|
||||
nvkm_wr32(device, 0x000004, 0x01000001);
|
||||
nvkm_rd32(device, 0x000000);
|
||||
if (!nvkm_device_endianness(device)) {
|
||||
nvdev_error(device,
|
||||
"GPU not supported on big-endian\n");
|
||||
ret = -ENOSYS;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* read boot0 and strapping information */
|
||||
boot0 = ioread32_native(map + 0x000000);
|
||||
strap = ioread32_native(map + 0x101000);
|
||||
iounmap(map);
|
||||
boot0 = nvkm_rd32(device, 0x000000);
|
||||
|
||||
/* chipset can be overridden for devel/testing purposes */
|
||||
chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
|
||||
@ -3138,6 +3155,17 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
nvdev_info(device, "NVIDIA %s (%08x)\n",
|
||||
device->chip->name, boot0);
|
||||
|
||||
/* vGPU detection */
|
||||
boot1 = nvkm_rd32(device, 0x0000004);
|
||||
if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
|
||||
nvdev_info(device, "vGPUs are not supported\n");
|
||||
ret = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* read strapping information */
|
||||
strap = nvkm_rd32(device, 0x101000);
|
||||
|
||||
/* determine frequency of timing crystal */
|
||||
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
|
||||
(device->chipset >= 0x20 && device->chipset < 0x25))
|
||||
@ -3158,15 +3186,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
if (!device->name)
|
||||
device->name = device->chip->name;
|
||||
|
||||
if (mmio) {
|
||||
device->pri = ioremap(mmio_base, mmio_size);
|
||||
if (!device->pri) {
|
||||
nvdev_error(device, "unable to map PRI\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_init(&device->mutex);
|
||||
|
||||
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
|
||||
@ -3254,6 +3273,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
|
||||
ret = 0;
|
||||
done:
|
||||
if (device->pri && (!mmio || ret)) {
|
||||
iounmap(device->pri);
|
||||
device->pri = NULL;
|
||||
}
|
||||
mutex_unlock(&nv_devices_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ nvkm-y += nvkm/engine/disp/dp.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/hdagt215.o
|
||||
nvkm-y += nvkm/engine/disp/hdagf119.o
|
||||
nvkm-y += nvkm/engine/disp/hdagv100.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/hdmi.o
|
||||
nvkm-y += nvkm/engine/disp/hdmig84.o
|
||||
@ -74,6 +75,8 @@ nvkm-y += nvkm/engine/disp/rootgp102.o
|
||||
nvkm-y += nvkm/engine/disp/rootgv100.o
|
||||
nvkm-y += nvkm/engine/disp/roottu102.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/capsgv100.o
|
||||
|
||||
nvkm-y += nvkm/engine/disp/channv50.o
|
||||
nvkm-y += nvkm/engine/disp/changf119.o
|
||||
nvkm-y += nvkm/engine/disp/changv100.o
|
||||
|
60
drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
Normal file
60
drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright 2020 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
|
||||
#include "rootnv50.h"
|
||||
|
||||
struct gv100_disp_caps {
|
||||
struct nvkm_object object;
|
||||
struct nv50_disp *disp;
|
||||
};
|
||||
|
||||
static int
|
||||
gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
|
||||
enum nvkm_object_map *type, u64 *addr, u64 *size)
|
||||
{
|
||||
struct gv100_disp_caps *caps = gv100_disp_caps(object);
|
||||
struct nvkm_device *device = caps->disp->base.engine.subdev.device;
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
*addr = 0x640000 + device->func->resource_addr(device, 0);
|
||||
*size = 0x1000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_object_func
|
||||
gv100_disp_caps = {
|
||||
.map = gv100_disp_caps_map,
|
||||
};
|
||||
|
||||
int
|
||||
gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
|
||||
struct nv50_disp *disp, struct nvkm_object **pobject)
|
||||
{
|
||||
struct gv100_disp_caps *caps;
|
||||
|
||||
if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
*pobject = &caps->object;
|
||||
|
||||
nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
|
||||
caps->disp = disp;
|
||||
return 0;
|
||||
}
|
@ -24,10 +24,18 @@
|
||||
#include "ior.h"
|
||||
|
||||
void
|
||||
gf119_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
|
||||
gf119_hda_device_entry(struct nvkm_ior *ior, int head)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = 0x030 * ior->id;
|
||||
const u32 hoff = 0x800 * head;
|
||||
nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
|
||||
}
|
||||
|
||||
void
|
||||
gf119_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = 0x030 * ior->id + (head * 0x04);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
@ -41,14 +49,14 @@ void
|
||||
gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 hoff = 0x800 * head;
|
||||
const u32 soff = 0x030 * ior->id + (head * 0x04);
|
||||
u32 data = 0x80000000;
|
||||
u32 mask = 0x80000001;
|
||||
if (present) {
|
||||
nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
|
||||
ior->func->hda.device_entry(ior, head);
|
||||
data |= 0x00000001;
|
||||
} else {
|
||||
mask |= 0x00000002;
|
||||
}
|
||||
nvkm_mask(device, 0x10ec10 + ior->id * 0x030, mask, data);
|
||||
nvkm_mask(device, 0x10ec10 + soff, mask, data);
|
||||
}
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include "ior.h"
|
||||
|
||||
void
|
||||
gt215_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
|
||||
gt215_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = ior->id * 0x800;
|
||||
|
30
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
Normal file
30
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Copyright 2020 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "ior.h"
|
||||
|
||||
void
|
||||
gv100_hda_device_entry(struct nvkm_ior *ior, int head)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 hoff = 0x800 * head;
|
||||
nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
|
||||
}
|
@ -87,7 +87,8 @@ struct nvkm_ior_func {
|
||||
|
||||
struct {
|
||||
void (*hpd)(struct nvkm_ior *, int head, bool present);
|
||||
void (*eld)(struct nvkm_ior *, u8 *data, u8 size);
|
||||
void (*eld)(struct nvkm_ior *, int head, u8 *data, u8 size);
|
||||
void (*device_entry)(struct nvkm_ior *, int head);
|
||||
} hda;
|
||||
};
|
||||
|
||||
@ -158,10 +159,13 @@ void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
|
||||
void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
|
||||
|
||||
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
|
||||
void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
|
||||
void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
|
||||
|
||||
void gf119_hda_hpd(struct nvkm_ior *, int, bool);
|
||||
void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
|
||||
void gf119_hda_eld(struct nvkm_ior *, int, u8 *, u8);
|
||||
void gf119_hda_device_entry(struct nvkm_ior *, int);
|
||||
|
||||
void gv100_hda_device_entry(struct nvkm_ior *, int);
|
||||
|
||||
#define IOR_MSG(i,l,f,a...) do { \
|
||||
struct nvkm_ior *_ior = (i); \
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_root_func
|
||||
gv100_disp_root = {
|
||||
.user = {
|
||||
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
|
||||
{{0,0,GV100_DISP_CURSOR }, gv100_disp_curs_new },
|
||||
{{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
|
||||
{{0,0,GV100_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
|
||||
|
@ -155,7 +155,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
|
||||
if (outp->info.type == DCB_OUTPUT_DP)
|
||||
ior->func->dp.audio(ior, hidx, true);
|
||||
ior->func->hda.hpd(ior, hidx, true);
|
||||
ior->func->hda.eld(ior, data, size);
|
||||
ior->func->hda.eld(ior, hidx, data, size);
|
||||
} else {
|
||||
if (outp->info.type == DCB_OUTPUT_DP)
|
||||
ior->func->dp.audio(ior, hidx, false);
|
||||
|
@ -24,6 +24,9 @@ int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
|
||||
const struct nvkm_oclass *, void *data, u32 size,
|
||||
struct nvkm_object **);
|
||||
|
||||
int gv100_disp_caps_new(const struct nvkm_oclass *, void *, u32,
|
||||
struct nv50_disp *, struct nvkm_object **);
|
||||
|
||||
extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass g84_disp_root_oclass;
|
||||
extern const struct nvkm_disp_oclass g94_disp_root_oclass;
|
||||
|
@ -27,6 +27,7 @@
|
||||
static const struct nv50_disp_root_func
|
||||
tu102_disp_root = {
|
||||
.user = {
|
||||
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
|
||||
{{0,0,TU102_DISP_CURSOR }, gv100_disp_curs_new },
|
||||
{{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
|
||||
{{0,0,TU102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
|
||||
|
@ -177,6 +177,7 @@ gf119_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gf119_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -43,6 +43,7 @@ gk104_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gf119_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -57,6 +57,7 @@ gm107_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gf119_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -115,6 +115,7 @@ gm200_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gf119_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -103,6 +103,7 @@ gv100_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gv100_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -88,6 +88,7 @@ tu102_sor = {
|
||||
.hda = {
|
||||
.hpd = gf119_hda_hpd,
|
||||
.eld = gf119_hda_eld,
|
||||
.device_entry = gv100_hda_device_entry,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -319,6 +319,17 @@ gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
|
||||
MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
|
||||
MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
|
||||
#endif
|
||||
|
||||
static int
|
||||
gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
|
||||
{
|
||||
|
@ -250,6 +250,11 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
|
||||
list_add_tail(&lsf->head, &acr->lsf);
|
||||
}
|
||||
|
||||
/* Ensure the falcon that'll provide ACR functions is booted first. */
|
||||
lsf = nvkm_acr_falcon(device);
|
||||
if (lsf)
|
||||
list_move(&lsf->head, &acr->lsf);
|
||||
|
||||
if (!acr->wpr_fw || acr->wpr_comp)
|
||||
wpr_size = acr->func->wpr_layout(acr);
|
||||
|
||||
|
@ -100,25 +100,21 @@ nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
|
||||
hsfw->data_size = lhdr->data_size;
|
||||
|
||||
hsfw->sig.prod.size = fwhdr->sig_prod_size;
|
||||
hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
|
||||
hsfw->sig.prod.data = kmemdup(fw->data + fwhdr->sig_prod_offset + sig,
|
||||
hsfw->sig.prod.size, GFP_KERNEL);
|
||||
if (!hsfw->sig.prod.data) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
|
||||
hsfw->sig.prod.size);
|
||||
|
||||
hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
|
||||
hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
|
||||
hsfw->sig.dbg.data = kmemdup(fw->data + fwhdr->sig_dbg_offset + sig,
|
||||
hsfw->sig.dbg.size, GFP_KERNEL);
|
||||
if (!hsfw->sig.dbg.data) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
|
||||
hsfw->sig.dbg.size);
|
||||
|
||||
hsfw->sig.patch_loc = loc;
|
||||
done:
|
||||
nvkm_firmware_put(fw);
|
||||
|
@ -22,22 +22,39 @@
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
static int
|
||||
acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
|
||||
{
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
|
||||
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
|
||||
bool nouveau_acpi_rom_supported(struct device *);
|
||||
#else
|
||||
static inline bool
|
||||
nouveau_acpi_rom_supported(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
acpi_status status;
|
||||
union acpi_object rom_arg_elements[2], *obj;
|
||||
struct acpi_object_list rom_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
||||
static inline int
|
||||
nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
|
||||
{
|
||||
rom_arg.count = 2;
|
||||
rom_arg.pointer = &rom_arg_elements[0];
|
||||
|
||||
rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
|
||||
rom_arg_elements[0].integer.value = offset;
|
||||
|
||||
rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
|
||||
rom_arg_elements[1].integer.value = length;
|
||||
|
||||
status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
pr_info("failed to evaluate ROM got %s\n",
|
||||
acpi_format_exception(status));
|
||||
return -ENODEV;
|
||||
}
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
length = min(length, obj->buffer.length);
|
||||
memcpy(bios+offset, obj->buffer.pointer, length);
|
||||
kfree(buffer.pointer);
|
||||
return length;
|
||||
#else
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* This version of the shadow function disobeys the ACPI spec and tries
|
||||
* to fetch in units of more than 4KiB at a time. This is a LOT faster
|
||||
@ -51,7 +68,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
|
||||
u32 fetch = limit - start;
|
||||
|
||||
if (nvbios_extend(bios, limit) >= 0) {
|
||||
int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
|
||||
int ret = acpi_read_bios(data, bios->data, start, fetch);
|
||||
if (ret == fetch)
|
||||
return fetch;
|
||||
}
|
||||
@ -73,9 +90,8 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
|
||||
|
||||
if (nvbios_extend(bios, limit) >= 0) {
|
||||
while (start + fetch < limit) {
|
||||
int ret = nouveau_acpi_get_bios_chunk(bios->data,
|
||||
start + fetch,
|
||||
0x1000);
|
||||
int ret = acpi_read_bios(data, bios->data,
|
||||
start + fetch, 0x1000);
|
||||
if (ret != 0x1000)
|
||||
break;
|
||||
fetch += 0x1000;
|
||||
@ -88,9 +104,22 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
|
||||
static void *
|
||||
acpi_init(struct nvkm_bios *bios, const char *name)
|
||||
{
|
||||
if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
|
||||
acpi_status status;
|
||||
acpi_handle dhandle, rom_handle;
|
||||
|
||||
dhandle = ACPI_HANDLE(bios->subdev.device->dev);
|
||||
if (!dhandle)
|
||||
return ERR_PTR(-ENODEV);
|
||||
return NULL;
|
||||
|
||||
status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
|
||||
if (ACPI_FAILURE(status))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return rom_handle;
|
||||
#else
|
||||
return ERR_PTR(-ENODEV);
|
||||
#endif
|
||||
}
|
||||
|
||||
const struct nvbios_source
|
||||
|
@ -114,9 +114,5 @@ int
|
||||
gf100_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -43,9 +43,5 @@ int
|
||||
gf117_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -117,9 +117,5 @@ int
|
||||
gk104_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -81,9 +81,5 @@ int
|
||||
gk20a_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -32,9 +32,5 @@ int
|
||||
gm200_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -51,9 +51,5 @@ int
|
||||
gp10b_ibus_new(struct nvkm_device *device, int index,
|
||||
struct nvkm_subdev **pibus)
|
||||
{
|
||||
struct nvkm_subdev *ibus;
|
||||
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
|
||||
return 0;
|
||||
return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
|
||||
it.pte[it.lvl]++;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
nvkm_vmm_flush(&it);
|
||||
return ~0ULL;
|
||||
|
@ -304,7 +304,7 @@ int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
|
||||
FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
|
||||
PTEI += _ptes; \
|
||||
PTEN -= _ptes; \
|
||||
}; \
|
||||
} \
|
||||
nvkm_done((PT)->memory); \
|
||||
} while(0)
|
||||
|
||||
|
@ -527,7 +527,113 @@ extern "C" {
|
||||
#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
|
||||
|
||||
/*
|
||||
* 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
|
||||
* Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
|
||||
* and Tegra GPUs starting with Tegra K1.
|
||||
*
|
||||
* Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
|
||||
* based on the architecture generation. GOBs themselves are then arranged in
|
||||
* 3D blocks, with the block dimensions (in terms of GOBs) always being a power
|
||||
* of two, and hence expressible as their log2 equivalent (E.g., "2" represents
|
||||
* a block depth or height of "4").
|
||||
*
|
||||
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
|
||||
* in full detail.
|
||||
*
|
||||
* Macro
|
||||
* Bits Param Description
|
||||
* ---- ----- -----------------------------------------------------------------
|
||||
*
|
||||
* 3:0 h log2(height) of each block, in GOBs. Placed here for
|
||||
* compatibility with the existing
|
||||
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
|
||||
*
|
||||
* 4:4 - Must be 1, to indicate block-linear layout. Necessary for
|
||||
* compatibility with the existing
|
||||
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
|
||||
*
|
||||
* 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
|
||||
* size). Must be zero.
|
||||
*
|
||||
* Note there is no log2(width) parameter. Some portions of the
|
||||
* hardware support a block width of two gobs, but it is impractical
|
||||
* to use due to lack of support elsewhere, and has no known
|
||||
* benefits.
|
||||
*
|
||||
* 11:9 - Reserved (To support 2D-array textures with variable array stride
|
||||
* in blocks, specified via log2(tile width in blocks)). Must be
|
||||
* zero.
|
||||
*
|
||||
* 19:12 k Page Kind. This value directly maps to a field in the page
|
||||
* tables of all GPUs >= NV50. It affects the exact layout of bits
|
||||
* in memory and can be derived from the tuple
|
||||
*
|
||||
* (format, GPU model, compression type, samples per pixel)
|
||||
*
|
||||
* Where compression type is defined below. If GPU model were
|
||||
* implied by the format modifier, format, or memory buffer, page
|
||||
* kind would not need to be included in the modifier itself, but
|
||||
* since the modifier should define the layout of the associated
|
||||
* memory buffer independent from any device or other context, it
|
||||
* must be included here.
|
||||
*
|
||||
* 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
|
||||
* starting with Fermi GPUs. Additionally, the mapping between page
|
||||
* kind and bit layout has changed at various points.
|
||||
*
|
||||
* 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
|
||||
* 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
|
||||
* 2 = Gob Height 8, Turing+ Page Kind mapping
|
||||
* 3 = Reserved for future use.
|
||||
*
|
||||
* 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
|
||||
* bit remapping step that occurs at an even lower level than the
|
||||
* page kind and block linear swizzles. This causes the layout of
|
||||
* surfaces mapped in those SOC's GPUs to be incompatible with the
|
||||
* equivalent mapping on other GPUs in the same system.
|
||||
*
|
||||
* 0 = Tegra K1 - Tegra Parker/TX2 Layout.
|
||||
* 1 = Desktop GPU and Tegra Xavier+ Layout
|
||||
*
|
||||
* 25:23 c Lossless Framebuffer Compression type.
|
||||
*
|
||||
* 0 = none
|
||||
* 1 = ROP/3D, layout 1, exact compression format implied by Page
|
||||
* Kind field
|
||||
* 2 = ROP/3D, layout 2, exact compression format implied by Page
|
||||
* Kind field
|
||||
* 3 = CDE horizontal
|
||||
* 4 = CDE vertical
|
||||
* 5 = Reserved for future use
|
||||
* 6 = Reserved for future use
|
||||
* 7 = Reserved for future use
|
||||
*
|
||||
* 55:25 - Reserved for future use. Must be zero.
|
||||
*/
|
||||
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
|
||||
fourcc_mod_code(NVIDIA, (0x10 | \
|
||||
((h) & 0xf) | \
|
||||
(((k) & 0xff) << 12) | \
|
||||
(((g) & 0x3) << 20) | \
|
||||
(((s) & 0x1) << 22) | \
|
||||
(((c) & 0x7) << 23)))
|
||||
|
||||
/* To grandfather in prior block linear format modifiers to the above layout,
|
||||
* the page kind "0", which corresponds to "pitch/linear" and hence is unusable
|
||||
* with block-linear layouts, is remapped within drivers to the value 0xfe,
|
||||
* which corresponds to the "generic" kind used for simple single-sample
|
||||
* uncompressed color formats on Fermi - Volta GPUs.
|
||||
*/
|
||||
static inline __u64
|
||||
drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
|
||||
{
|
||||
if (!(modifier & 0x10) || (modifier & (0xff << 12)))
|
||||
return modifier;
|
||||
else
|
||||
return modifier | (0xfe << 12);
|
||||
}
|
||||
|
||||
/*
|
||||
* 16Bx2 Block Linear layout, used by Tegra K1 and later
|
||||
*
|
||||
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
|
||||
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
|
||||
@ -548,20 +654,20 @@ extern "C" {
|
||||
* in full detail.
|
||||
*/
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
|
||||
fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
|
||||
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
|
||||
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x10)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x11)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x12)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x13)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x14)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
|
||||
fourcc_mod_code(NVIDIA, 0x15)
|
||||
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
|
||||
|
||||
/*
|
||||
* Some Broadcom modifiers take parameters, for example the number of
|
||||
|
Loading…
Reference in New Issue
Block a user