mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 18:07:21 +07:00
59e7a8cc2d
A bit smaller this time around.. there are still a couple uabi additions for vulkan waiting in the wings, but I punted on them this cycle due to running low on time. (They should be easy enough to rebase, and if it is a problem for anyone I can push a next+uabi branch so that tu work can proceed.) The bigger change is refactoring dpu resource manager and moving dpu to use atomic global state. Other than that, it is mostly cleanups and fixes. From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGuf1R4Xz-t9Z7_cwx9jD=b4wUvvwfqA5cHR8fCSXSd5XQ@mail.gmail.com Signed-off-by: Dave Airlie <airlied@redhat.com>
1381 lines
32 KiB
C
1381 lines
32 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
|
* Copyright (C) 2013 Red Hat
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
*/
|
|
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/uaccess.h>
|
|
#include <uapi/linux/sched/types.h>
|
|
|
|
#include <drm/drm_drv.h>
|
|
#include <drm/drm_file.h>
|
|
#include <drm/drm_ioctl.h>
|
|
#include <drm/drm_irq.h>
|
|
#include <drm/drm_prime.h>
|
|
#include <drm/drm_of.h>
|
|
#include <drm/drm_vblank.h>
|
|
|
|
#include "msm_drv.h"
|
|
#include "msm_debugfs.h"
|
|
#include "msm_fence.h"
|
|
#include "msm_gem.h"
|
|
#include "msm_gpu.h"
|
|
#include "msm_kms.h"
|
|
#include "adreno/adreno_gpu.h"
|
|
|
|
/*
|
|
* MSM driver version:
|
|
* - 1.0.0 - initial interface
|
|
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
|
|
* - 1.2.0 - adds explicit fence support for submit ioctl
|
|
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
|
|
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
|
|
* MSM_GEM_INFO ioctl.
|
|
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
|
|
* GEM object's debug name
|
|
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
|
|
*/
|
|
#define MSM_VERSION_MAJOR 1
|
|
#define MSM_VERSION_MINOR 5
|
|
#define MSM_VERSION_PATCHLEVEL 0
|
|
|
|
static const struct drm_mode_config_funcs mode_config_funcs = {
|
|
.fb_create = msm_framebuffer_create,
|
|
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
|
.atomic_check = drm_atomic_helper_check,
|
|
.atomic_commit = drm_atomic_helper_commit,
|
|
};
|
|
|
|
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
|
|
.atomic_commit_tail = msm_atomic_commit_tail,
|
|
};
|
|
|
|
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
|
|
static bool reglog = false;
|
|
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
|
|
module_param(reglog, bool, 0600);
|
|
#else
|
|
#define reglog 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
|
static bool fbdev = true;
|
|
MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
|
|
module_param(fbdev, bool, 0600);
|
|
#endif
|
|
|
|
static char *vram = "16m";
|
|
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
|
|
module_param(vram, charp, 0);
|
|
|
|
bool dumpstate = false;
|
|
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
|
|
module_param(dumpstate, bool, 0600);
|
|
|
|
static bool modeset = true;
|
|
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
|
|
module_param(modeset, bool, 0600);
|
|
|
|
/*
|
|
* Util/helpers:
|
|
*/
|
|
|
|
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
|
|
const char *name)
|
|
{
|
|
int i;
|
|
char n[32];
|
|
|
|
snprintf(n, sizeof(n), "%s_clk", name);
|
|
|
|
for (i = 0; bulk && i < count; i++) {
|
|
if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
|
|
return bulk[i].clk;
|
|
}
|
|
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
|
|
{
|
|
struct clk *clk;
|
|
char name2[32];
|
|
|
|
clk = devm_clk_get(&pdev->dev, name);
|
|
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
|
|
return clk;
|
|
|
|
snprintf(name2, sizeof(name2), "%s_clk", name);
|
|
|
|
clk = devm_clk_get(&pdev->dev, name2);
|
|
if (!IS_ERR(clk))
|
|
dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
|
|
"\"%s\" instead of \"%s\"\n", name, name2);
|
|
|
|
return clk;
|
|
}
|
|
|
|
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
|
const char *dbgname)
|
|
{
|
|
struct resource *res;
|
|
unsigned long size;
|
|
void __iomem *ptr;
|
|
|
|
if (name)
|
|
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
|
else
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!res) {
|
|
DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
size = resource_size(res);
|
|
|
|
ptr = devm_ioremap(&pdev->dev, res->start, size);
|
|
if (!ptr) {
|
|
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
if (reglog)
|
|
printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void msm_writel(u32 data, void __iomem *addr)
|
|
{
|
|
if (reglog)
|
|
printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
|
|
writel(data, addr);
|
|
}
|
|
|
|
u32 msm_readl(const void __iomem *addr)
|
|
{
|
|
u32 val = readl(addr);
|
|
if (reglog)
|
|
pr_err("IO:R %p %08x\n", addr, val);
|
|
return val;
|
|
}
|
|
|
|
struct msm_vblank_work {
|
|
struct work_struct work;
|
|
int crtc_id;
|
|
bool enable;
|
|
struct msm_drm_private *priv;
|
|
};
|
|
|
|
static void vblank_ctrl_worker(struct work_struct *work)
|
|
{
|
|
struct msm_vblank_work *vbl_work = container_of(work,
|
|
struct msm_vblank_work, work);
|
|
struct msm_drm_private *priv = vbl_work->priv;
|
|
struct msm_kms *kms = priv->kms;
|
|
|
|
if (vbl_work->enable)
|
|
kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
|
|
else
|
|
kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
|
|
|
|
kfree(vbl_work);
|
|
}
|
|
|
|
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
|
|
int crtc_id, bool enable)
|
|
{
|
|
struct msm_vblank_work *vbl_work;
|
|
|
|
vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
|
|
if (!vbl_work)
|
|
return -ENOMEM;
|
|
|
|
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
|
|
|
|
vbl_work->crtc_id = crtc_id;
|
|
vbl_work->enable = enable;
|
|
vbl_work->priv = priv;
|
|
|
|
queue_work(priv->wq, &vbl_work->work);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int msm_drm_uninit(struct device *dev)
|
|
{
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
struct drm_device *ddev = platform_get_drvdata(pdev);
|
|
struct msm_drm_private *priv = ddev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
struct msm_mdss *mdss = priv->mdss;
|
|
int i;
|
|
|
|
/*
|
|
* Shutdown the hw if we're far enough along where things might be on.
|
|
* If we run this too early, we'll end up panicking in any variety of
|
|
* places. Since we don't register the drm device until late in
|
|
* msm_drm_init, drm_dev->registered is used as an indicator that the
|
|
* shutdown will be successful.
|
|
*/
|
|
if (ddev->registered) {
|
|
drm_dev_unregister(ddev);
|
|
drm_atomic_helper_shutdown(ddev);
|
|
}
|
|
|
|
/* We must cancel and cleanup any pending vblank enable/disable
|
|
* work before drm_irq_uninstall() to avoid work re-enabling an
|
|
* irq after uninstall has disabled it.
|
|
*/
|
|
|
|
flush_workqueue(priv->wq);
|
|
|
|
/* clean up event worker threads */
|
|
for (i = 0; i < priv->num_crtcs; i++) {
|
|
if (priv->event_thread[i].thread) {
|
|
kthread_destroy_worker(&priv->event_thread[i].worker);
|
|
priv->event_thread[i].thread = NULL;
|
|
}
|
|
}
|
|
|
|
msm_gem_shrinker_cleanup(ddev);
|
|
|
|
drm_kms_helper_poll_fini(ddev);
|
|
|
|
msm_perf_debugfs_cleanup(priv);
|
|
msm_rd_debugfs_cleanup(priv);
|
|
|
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
|
if (fbdev && priv->fbdev)
|
|
msm_fbdev_free(ddev);
|
|
#endif
|
|
|
|
drm_mode_config_cleanup(ddev);
|
|
|
|
pm_runtime_get_sync(dev);
|
|
drm_irq_uninstall(ddev);
|
|
pm_runtime_put_sync(dev);
|
|
|
|
if (kms && kms->funcs)
|
|
kms->funcs->destroy(kms);
|
|
|
|
if (priv->vram.paddr) {
|
|
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
|
|
drm_mm_takedown(&priv->vram.mm);
|
|
dma_free_attrs(dev, priv->vram.size, NULL,
|
|
priv->vram.paddr, attrs);
|
|
}
|
|
|
|
component_unbind_all(dev, ddev);
|
|
|
|
if (mdss && mdss->funcs)
|
|
mdss->funcs->destroy(ddev);
|
|
|
|
ddev->dev_private = NULL;
|
|
drm_dev_put(ddev);
|
|
|
|
destroy_workqueue(priv->wq);
|
|
kfree(priv);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define KMS_MDP4 4
|
|
#define KMS_MDP5 5
|
|
#define KMS_DPU 3
|
|
|
|
static int get_mdp_ver(struct platform_device *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
|
|
return (int) (unsigned long) of_device_get_match_data(dev);
|
|
}
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
bool msm_use_mmu(struct drm_device *dev)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
|
|
/* a2xx comes with its own MMU */
|
|
return priv->is_a2xx || iommu_present(&platform_bus_type);
|
|
}
|
|
|
|
static int msm_init_vram(struct drm_device *dev)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct device_node *node;
|
|
unsigned long size = 0;
|
|
int ret = 0;
|
|
|
|
/* In the device-tree world, we could have a 'memory-region'
|
|
* phandle, which gives us a link to our "vram". Allocating
|
|
* is all nicely abstracted behind the dma api, but we need
|
|
* to know the entire size to allocate it all in one go. There
|
|
* are two cases:
|
|
* 1) device with no IOMMU, in which case we need exclusive
|
|
* access to a VRAM carveout big enough for all gpu
|
|
* buffers
|
|
* 2) device with IOMMU, but where the bootloader puts up
|
|
* a splash screen. In this case, the VRAM carveout
|
|
* need only be large enough for fbdev fb. But we need
|
|
* exclusive access to the buffer to avoid the kernel
|
|
* using those pages for other purposes (which appears
|
|
* as corruption on screen before we have a chance to
|
|
* load and do initial modeset)
|
|
*/
|
|
|
|
node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
|
|
if (node) {
|
|
struct resource r;
|
|
ret = of_address_to_resource(node, 0, &r);
|
|
of_node_put(node);
|
|
if (ret)
|
|
return ret;
|
|
size = r.end - r.start;
|
|
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
|
|
|
|
/* if we have no IOMMU, then we need to use carveout allocator.
|
|
* Grab the entire CMA chunk carved out in early startup in
|
|
* mach-msm:
|
|
*/
|
|
} else if (!msm_use_mmu(dev)) {
|
|
DRM_INFO("using %s VRAM carveout\n", vram);
|
|
size = memparse(vram, NULL);
|
|
}
|
|
|
|
if (size) {
|
|
unsigned long attrs = 0;
|
|
void *p;
|
|
|
|
priv->vram.size = size;
|
|
|
|
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
|
|
spin_lock_init(&priv->vram.lock);
|
|
|
|
attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
|
|
attrs |= DMA_ATTR_WRITE_COMBINE;
|
|
|
|
/* note that for no-kernel-mapping, the vaddr returned
|
|
* is bogus, but non-null if allocation succeeded:
|
|
*/
|
|
p = dma_alloc_attrs(dev->dev, size,
|
|
&priv->vram.paddr, GFP_KERNEL, attrs);
|
|
if (!p) {
|
|
DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
|
|
priv->vram.paddr = 0;
|
|
return -ENOMEM;
|
|
}
|
|
|
|
DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
|
|
(uint32_t)priv->vram.paddr,
|
|
(uint32_t)(priv->vram.paddr + size));
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|
{
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
struct drm_device *ddev;
|
|
struct msm_drm_private *priv;
|
|
struct msm_kms *kms;
|
|
struct msm_mdss *mdss;
|
|
int ret, i;
|
|
struct sched_param param;
|
|
|
|
ddev = drm_dev_alloc(drv, dev);
|
|
if (IS_ERR(ddev)) {
|
|
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
|
|
return PTR_ERR(ddev);
|
|
}
|
|
|
|
platform_set_drvdata(pdev, ddev);
|
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
if (!priv) {
|
|
ret = -ENOMEM;
|
|
goto err_put_drm_dev;
|
|
}
|
|
|
|
ddev->dev_private = priv;
|
|
priv->dev = ddev;
|
|
|
|
switch (get_mdp_ver(pdev)) {
|
|
case KMS_MDP5:
|
|
ret = mdp5_mdss_init(ddev);
|
|
break;
|
|
case KMS_DPU:
|
|
ret = dpu_mdss_init(ddev);
|
|
break;
|
|
default:
|
|
ret = 0;
|
|
break;
|
|
}
|
|
if (ret)
|
|
goto err_free_priv;
|
|
|
|
mdss = priv->mdss;
|
|
|
|
priv->wq = alloc_ordered_workqueue("msm", 0);
|
|
|
|
INIT_WORK(&priv->free_work, msm_gem_free_work);
|
|
init_llist_head(&priv->free_list);
|
|
|
|
INIT_LIST_HEAD(&priv->inactive_list);
|
|
|
|
drm_mode_config_init(ddev);
|
|
|
|
/* Bind all our sub-components: */
|
|
ret = component_bind_all(dev, ddev);
|
|
if (ret)
|
|
goto err_destroy_mdss;
|
|
|
|
ret = msm_init_vram(ddev);
|
|
if (ret)
|
|
goto err_msm_uninit;
|
|
|
|
if (!dev->dma_parms) {
|
|
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
|
|
GFP_KERNEL);
|
|
if (!dev->dma_parms) {
|
|
ret = -ENOMEM;
|
|
goto err_msm_uninit;
|
|
}
|
|
}
|
|
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
|
|
|
|
msm_gem_shrinker_init(ddev);
|
|
|
|
switch (get_mdp_ver(pdev)) {
|
|
case KMS_MDP4:
|
|
kms = mdp4_kms_init(ddev);
|
|
priv->kms = kms;
|
|
break;
|
|
case KMS_MDP5:
|
|
kms = mdp5_kms_init(ddev);
|
|
break;
|
|
case KMS_DPU:
|
|
kms = dpu_kms_init(ddev);
|
|
priv->kms = kms;
|
|
break;
|
|
default:
|
|
/* valid only for the dummy headless case, where of_node=NULL */
|
|
WARN_ON(dev->of_node);
|
|
kms = NULL;
|
|
break;
|
|
}
|
|
|
|
if (IS_ERR(kms)) {
|
|
DRM_DEV_ERROR(dev, "failed to load kms\n");
|
|
ret = PTR_ERR(kms);
|
|
priv->kms = NULL;
|
|
goto err_msm_uninit;
|
|
}
|
|
|
|
/* Enable normalization of plane zpos */
|
|
ddev->mode_config.normalize_zpos = true;
|
|
|
|
if (kms) {
|
|
kms->dev = ddev;
|
|
ret = kms->funcs->hw_init(kms);
|
|
if (ret) {
|
|
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
|
|
goto err_msm_uninit;
|
|
}
|
|
}
|
|
|
|
ddev->mode_config.funcs = &mode_config_funcs;
|
|
ddev->mode_config.helper_private = &mode_config_helper_funcs;
|
|
|
|
/**
|
|
* this priority was found during empiric testing to have appropriate
|
|
* realtime scheduling to process display updates and interact with
|
|
* other real time and normal priority task
|
|
*/
|
|
param.sched_priority = 16;
|
|
for (i = 0; i < priv->num_crtcs; i++) {
|
|
/* initialize event thread */
|
|
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
|
|
kthread_init_worker(&priv->event_thread[i].worker);
|
|
priv->event_thread[i].dev = ddev;
|
|
priv->event_thread[i].thread =
|
|
kthread_run(kthread_worker_fn,
|
|
&priv->event_thread[i].worker,
|
|
"crtc_event:%d", priv->event_thread[i].crtc_id);
|
|
if (IS_ERR(priv->event_thread[i].thread)) {
|
|
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
|
|
priv->event_thread[i].thread = NULL;
|
|
goto err_msm_uninit;
|
|
}
|
|
|
|
ret = sched_setscheduler(priv->event_thread[i].thread,
|
|
SCHED_FIFO, ¶m);
|
|
if (ret)
|
|
dev_warn(dev, "event_thread set priority failed:%d\n",
|
|
ret);
|
|
}
|
|
|
|
ret = drm_vblank_init(ddev, priv->num_crtcs);
|
|
if (ret < 0) {
|
|
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
|
|
goto err_msm_uninit;
|
|
}
|
|
|
|
if (kms) {
|
|
pm_runtime_get_sync(dev);
|
|
ret = drm_irq_install(ddev, kms->irq);
|
|
pm_runtime_put_sync(dev);
|
|
if (ret < 0) {
|
|
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
|
|
goto err_msm_uninit;
|
|
}
|
|
}
|
|
|
|
ret = drm_dev_register(ddev, 0);
|
|
if (ret)
|
|
goto err_msm_uninit;
|
|
|
|
drm_mode_config_reset(ddev);
|
|
|
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
|
if (kms && fbdev)
|
|
priv->fbdev = msm_fbdev_init(ddev);
|
|
#endif
|
|
|
|
ret = msm_debugfs_late_init(ddev);
|
|
if (ret)
|
|
goto err_msm_uninit;
|
|
|
|
drm_kms_helper_poll_init(ddev);
|
|
|
|
return 0;
|
|
|
|
err_msm_uninit:
|
|
msm_drm_uninit(dev);
|
|
return ret;
|
|
err_destroy_mdss:
|
|
if (mdss && mdss->funcs)
|
|
mdss->funcs->destroy(ddev);
|
|
err_free_priv:
|
|
kfree(priv);
|
|
err_put_drm_dev:
|
|
drm_dev_put(ddev);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* DRM operations:
|
|
*/
|
|
|
|
static void load_gpu(struct drm_device *dev)
|
|
{
|
|
static DEFINE_MUTEX(init_lock);
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
|
|
mutex_lock(&init_lock);
|
|
|
|
if (!priv->gpu)
|
|
priv->gpu = adreno_load_gpu(dev);
|
|
|
|
mutex_unlock(&init_lock);
|
|
}
|
|
|
|
static int context_init(struct drm_device *dev, struct drm_file *file)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_file_private *ctx;
|
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
|
if (!ctx)
|
|
return -ENOMEM;
|
|
|
|
msm_submitqueue_init(dev, ctx);
|
|
|
|
ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
|
|
file->driver_priv = ctx;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int msm_open(struct drm_device *dev, struct drm_file *file)
|
|
{
|
|
/* For now, load gpu on open.. to avoid the requirement of having
|
|
* firmware in the initrd.
|
|
*/
|
|
load_gpu(dev);
|
|
|
|
return context_init(dev, file);
|
|
}
|
|
|
|
static void context_close(struct msm_file_private *ctx)
|
|
{
|
|
msm_submitqueue_close(ctx);
|
|
kfree(ctx);
|
|
}
|
|
|
|
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_file_private *ctx = file->driver_priv;
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
if (ctx == priv->lastctx)
|
|
priv->lastctx = NULL;
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
context_close(ctx);
|
|
}
|
|
|
|
static irqreturn_t msm_irq(int irq, void *arg)
|
|
{
|
|
struct drm_device *dev = arg;
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
BUG_ON(!kms);
|
|
return kms->funcs->irq(kms);
|
|
}
|
|
|
|
static void msm_irq_preinstall(struct drm_device *dev)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
BUG_ON(!kms);
|
|
kms->funcs->irq_preinstall(kms);
|
|
}
|
|
|
|
static int msm_irq_postinstall(struct drm_device *dev)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
BUG_ON(!kms);
|
|
|
|
if (kms->funcs->irq_postinstall)
|
|
return kms->funcs->irq_postinstall(kms);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void msm_irq_uninstall(struct drm_device *dev)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
BUG_ON(!kms);
|
|
kms->funcs->irq_uninstall(kms);
|
|
}
|
|
|
|
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
|
|
{
|
|
struct drm_device *dev = crtc->dev;
|
|
unsigned int pipe = crtc->index;
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
if (!kms)
|
|
return -ENXIO;
|
|
DBG("dev=%p, crtc=%u", dev, pipe);
|
|
return vblank_ctrl_queue_work(priv, pipe, true);
|
|
}
|
|
|
|
void msm_crtc_disable_vblank(struct drm_crtc *crtc)
|
|
{
|
|
struct drm_device *dev = crtc->dev;
|
|
unsigned int pipe = crtc->index;
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct msm_kms *kms = priv->kms;
|
|
if (!kms)
|
|
return;
|
|
DBG("dev=%p, crtc=%u", dev, pipe);
|
|
vblank_ctrl_queue_work(priv, pipe, false);
|
|
}
|
|
|
|
/*
|
|
* DRM ioctls:
|
|
*/
|
|
|
|
static int msm_ioctl_get_param(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct drm_msm_param *args = data;
|
|
struct msm_gpu *gpu;
|
|
|
|
/* for now, we just have 3d pipe.. eventually this would need to
|
|
* be more clever to dispatch to appropriate gpu module:
|
|
*/
|
|
if (args->pipe != MSM_PIPE_3D0)
|
|
return -EINVAL;
|
|
|
|
gpu = priv->gpu;
|
|
|
|
if (!gpu)
|
|
return -ENXIO;
|
|
|
|
return gpu->funcs->get_param(gpu, args->param, &args->value);
|
|
}
|
|
|
|
static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_gem_new *args = data;
|
|
|
|
if (args->flags & ~MSM_BO_FLAGS) {
|
|
DRM_ERROR("invalid flags: %08x\n", args->flags);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return msm_gem_new_handle(dev, file, args->size,
|
|
args->flags, &args->handle, NULL);
|
|
}
|
|
|
|
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
|
|
{
|
|
return ktime_set(timeout.tv_sec, timeout.tv_nsec);
|
|
}
|
|
|
|
static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_gem_cpu_prep *args = data;
|
|
struct drm_gem_object *obj;
|
|
ktime_t timeout = to_ktime(args->timeout);
|
|
int ret;
|
|
|
|
if (args->op & ~MSM_PREP_FLAGS) {
|
|
DRM_ERROR("invalid op: %08x\n", args->op);
|
|
return -EINVAL;
|
|
}
|
|
|
|
obj = drm_gem_object_lookup(file, args->handle);
|
|
if (!obj)
|
|
return -ENOENT;
|
|
|
|
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
|
|
|
|
drm_gem_object_put_unlocked(obj);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_gem_cpu_fini *args = data;
|
|
struct drm_gem_object *obj;
|
|
int ret;
|
|
|
|
obj = drm_gem_object_lookup(file, args->handle);
|
|
if (!obj)
|
|
return -ENOENT;
|
|
|
|
ret = msm_gem_cpu_fini(obj);
|
|
|
|
drm_gem_object_put_unlocked(obj);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int msm_ioctl_gem_info_iova(struct drm_device *dev,
|
|
struct drm_gem_object *obj, uint64_t *iova)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
|
|
if (!priv->gpu)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Don't pin the memory here - just get an address so that userspace can
|
|
* be productive
|
|
*/
|
|
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
|
|
}
|
|
|
|
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_gem_info *args = data;
|
|
struct drm_gem_object *obj;
|
|
struct msm_gem_object *msm_obj;
|
|
int i, ret = 0;
|
|
|
|
if (args->pad)
|
|
return -EINVAL;
|
|
|
|
switch (args->info) {
|
|
case MSM_INFO_GET_OFFSET:
|
|
case MSM_INFO_GET_IOVA:
|
|
/* value returned as immediate, not pointer, so len==0: */
|
|
if (args->len)
|
|
return -EINVAL;
|
|
break;
|
|
case MSM_INFO_SET_NAME:
|
|
case MSM_INFO_GET_NAME:
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
obj = drm_gem_object_lookup(file, args->handle);
|
|
if (!obj)
|
|
return -ENOENT;
|
|
|
|
msm_obj = to_msm_bo(obj);
|
|
|
|
switch (args->info) {
|
|
case MSM_INFO_GET_OFFSET:
|
|
args->value = msm_gem_mmap_offset(obj);
|
|
break;
|
|
case MSM_INFO_GET_IOVA:
|
|
ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
|
|
break;
|
|
case MSM_INFO_SET_NAME:
|
|
/* length check should leave room for terminating null: */
|
|
if (args->len >= sizeof(msm_obj->name)) {
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
|
|
args->len)) {
|
|
msm_obj->name[0] = '\0';
|
|
ret = -EFAULT;
|
|
break;
|
|
}
|
|
msm_obj->name[args->len] = '\0';
|
|
for (i = 0; i < args->len; i++) {
|
|
if (!isprint(msm_obj->name[i])) {
|
|
msm_obj->name[i] = '\0';
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
case MSM_INFO_GET_NAME:
|
|
if (args->value && (args->len < strlen(msm_obj->name))) {
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
args->len = strlen(msm_obj->name);
|
|
if (args->value) {
|
|
if (copy_to_user(u64_to_user_ptr(args->value),
|
|
msm_obj->name, args->len))
|
|
ret = -EFAULT;
|
|
}
|
|
break;
|
|
}
|
|
|
|
drm_gem_object_put_unlocked(obj);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct msm_drm_private *priv = dev->dev_private;
|
|
struct drm_msm_wait_fence *args = data;
|
|
ktime_t timeout = to_ktime(args->timeout);
|
|
struct msm_gpu_submitqueue *queue;
|
|
struct msm_gpu *gpu = priv->gpu;
|
|
int ret;
|
|
|
|
if (args->pad) {
|
|
DRM_ERROR("invalid pad: %08x\n", args->pad);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!gpu)
|
|
return 0;
|
|
|
|
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
|
|
if (!queue)
|
|
return -ENOENT;
|
|
|
|
ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
|
|
true);
|
|
|
|
msm_submitqueue_put(queue);
|
|
return ret;
|
|
}
|
|
|
|
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_gem_madvise *args = data;
|
|
struct drm_gem_object *obj;
|
|
int ret;
|
|
|
|
switch (args->madv) {
|
|
case MSM_MADV_DONTNEED:
|
|
case MSM_MADV_WILLNEED:
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
|
if (ret)
|
|
return ret;
|
|
|
|
obj = drm_gem_object_lookup(file, args->handle);
|
|
if (!obj) {
|
|
ret = -ENOENT;
|
|
goto unlock;
|
|
}
|
|
|
|
ret = msm_gem_madvise(obj, args->madv);
|
|
if (ret >= 0) {
|
|
args->retained = ret;
|
|
ret = 0;
|
|
}
|
|
|
|
drm_gem_object_put(obj);
|
|
|
|
unlock:
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return ret;
|
|
}
|
|
|
|
|
|
static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
struct drm_msm_submitqueue *args = data;
|
|
|
|
if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
|
|
return -EINVAL;
|
|
|
|
return msm_submitqueue_create(dev, file->driver_priv, args->prio,
|
|
args->flags, &args->id);
|
|
}
|
|
|
|
static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
return msm_submitqueue_query(dev, file->driver_priv, data);
|
|
}
|
|
|
|
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
|
|
struct drm_file *file)
|
|
{
|
|
u32 id = *(u32 *) data;
|
|
|
|
return msm_submitqueue_remove(file->driver_priv, id);
|
|
}
|
|
|
|
static const struct drm_ioctl_desc msm_ioctls[] = {
|
|
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
|
|
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
|
|
};
|
|
|
|
static const struct vm_operations_struct vm_ops = {
|
|
.fault = msm_gem_fault,
|
|
.open = drm_gem_vm_open,
|
|
.close = drm_gem_vm_close,
|
|
};
|
|
|
|
static const struct file_operations fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = drm_open,
|
|
.release = drm_release,
|
|
.unlocked_ioctl = drm_ioctl,
|
|
.compat_ioctl = drm_compat_ioctl,
|
|
.poll = drm_poll,
|
|
.read = drm_read,
|
|
.llseek = no_llseek,
|
|
.mmap = msm_gem_mmap,
|
|
};
|
|
|
|
static struct drm_driver msm_driver = {
|
|
.driver_features = DRIVER_GEM |
|
|
DRIVER_RENDER |
|
|
DRIVER_ATOMIC |
|
|
DRIVER_MODESET,
|
|
.open = msm_open,
|
|
.postclose = msm_postclose,
|
|
.lastclose = drm_fb_helper_lastclose,
|
|
.irq_handler = msm_irq,
|
|
.irq_preinstall = msm_irq_preinstall,
|
|
.irq_postinstall = msm_irq_postinstall,
|
|
.irq_uninstall = msm_irq_uninstall,
|
|
.gem_free_object_unlocked = msm_gem_free_object,
|
|
.gem_vm_ops = &vm_ops,
|
|
.dumb_create = msm_gem_dumb_create,
|
|
.dumb_map_offset = msm_gem_dumb_map_offset,
|
|
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
|
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
|
.gem_prime_pin = msm_gem_prime_pin,
|
|
.gem_prime_unpin = msm_gem_prime_unpin,
|
|
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
|
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
|
|
.gem_prime_vmap = msm_gem_prime_vmap,
|
|
.gem_prime_vunmap = msm_gem_prime_vunmap,
|
|
.gem_prime_mmap = msm_gem_prime_mmap,
|
|
#ifdef CONFIG_DEBUG_FS
|
|
.debugfs_init = msm_debugfs_init,
|
|
#endif
|
|
.ioctls = msm_ioctls,
|
|
.num_ioctls = ARRAY_SIZE(msm_ioctls),
|
|
.fops = &fops,
|
|
.name = "msm",
|
|
.desc = "MSM Snapdragon DRM",
|
|
.date = "20130625",
|
|
.major = MSM_VERSION_MAJOR,
|
|
.minor = MSM_VERSION_MINOR,
|
|
.patchlevel = MSM_VERSION_PATCHLEVEL,
|
|
};
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
static int msm_pm_suspend(struct device *dev)
|
|
{
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
struct msm_drm_private *priv = ddev->dev_private;
|
|
|
|
if (WARN_ON(priv->pm_state))
|
|
drm_atomic_state_put(priv->pm_state);
|
|
|
|
priv->pm_state = drm_atomic_helper_suspend(ddev);
|
|
if (IS_ERR(priv->pm_state)) {
|
|
int ret = PTR_ERR(priv->pm_state);
|
|
DRM_ERROR("Failed to suspend dpu, %d\n", ret);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int msm_pm_resume(struct device *dev)
|
|
{
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
struct msm_drm_private *priv = ddev->dev_private;
|
|
int ret;
|
|
|
|
if (WARN_ON(!priv->pm_state))
|
|
return -ENOENT;
|
|
|
|
ret = drm_atomic_helper_resume(ddev, priv->pm_state);
|
|
if (!ret)
|
|
priv->pm_state = NULL;
|
|
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PM
|
|
static int msm_runtime_suspend(struct device *dev)
|
|
{
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
struct msm_drm_private *priv = ddev->dev_private;
|
|
struct msm_mdss *mdss = priv->mdss;
|
|
|
|
DBG("");
|
|
|
|
if (mdss && mdss->funcs)
|
|
return mdss->funcs->disable(mdss);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int msm_runtime_resume(struct device *dev)
|
|
{
|
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
|
struct msm_drm_private *priv = ddev->dev_private;
|
|
struct msm_mdss *mdss = priv->mdss;
|
|
|
|
DBG("");
|
|
|
|
if (mdss && mdss->funcs)
|
|
return mdss->funcs->enable(mdss);
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static const struct dev_pm_ops msm_pm_ops = {
|
|
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
|
|
SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
|
|
};
|
|
|
|
/*
|
|
* Componentized driver support:
|
|
*/
|
|
|
|
/*
|
|
* NOTE: duplication of the same code as exynos or imx (or probably any other).
|
|
* so probably some room for some helpers
|
|
*/
|
|
static int compare_of(struct device *dev, void *data)
|
|
{
|
|
return dev->of_node == data;
|
|
}
|
|
|
|
/*
|
|
* Identify what components need to be added by parsing what remote-endpoints
|
|
* our MDP output ports are connected to. In the case of LVDS on MDP4, there
|
|
* is no external component that we need to add since LVDS is within MDP4
|
|
* itself.
|
|
*/
|
|
static int add_components_mdp(struct device *mdp_dev,
|
|
struct component_match **matchptr)
|
|
{
|
|
struct device_node *np = mdp_dev->of_node;
|
|
struct device_node *ep_node;
|
|
struct device *master_dev;
|
|
|
|
/*
|
|
* on MDP4 based platforms, the MDP platform device is the component
|
|
* master that adds other display interface components to itself.
|
|
*
|
|
* on MDP5 based platforms, the MDSS platform device is the component
|
|
* master that adds MDP5 and other display interface components to
|
|
* itself.
|
|
*/
|
|
if (of_device_is_compatible(np, "qcom,mdp4"))
|
|
master_dev = mdp_dev;
|
|
else
|
|
master_dev = mdp_dev->parent;
|
|
|
|
for_each_endpoint_of_node(np, ep_node) {
|
|
struct device_node *intf;
|
|
struct of_endpoint ep;
|
|
int ret;
|
|
|
|
ret = of_graph_parse_endpoint(ep_node, &ep);
|
|
if (ret) {
|
|
DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
|
|
of_node_put(ep_node);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* The LCDC/LVDS port on MDP4 is a speacial case where the
|
|
* remote-endpoint isn't a component that we need to add
|
|
*/
|
|
if (of_device_is_compatible(np, "qcom,mdp4") &&
|
|
ep.port == 0)
|
|
continue;
|
|
|
|
/*
|
|
* It's okay if some of the ports don't have a remote endpoint
|
|
* specified. It just means that the port isn't connected to
|
|
* any external interface.
|
|
*/
|
|
intf = of_graph_get_remote_port_parent(ep_node);
|
|
if (!intf)
|
|
continue;
|
|
|
|
if (of_device_is_available(intf))
|
|
drm_of_component_match_add(master_dev, matchptr,
|
|
compare_of, intf);
|
|
|
|
of_node_put(intf);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int compare_name_mdp(struct device *dev, void *data)
|
|
{
|
|
return (strstr(dev_name(dev), "mdp") != NULL);
|
|
}
|
|
|
|
static int add_display_components(struct device *dev,
|
|
struct component_match **matchptr)
|
|
{
|
|
struct device *mdp_dev;
|
|
int ret;
|
|
|
|
/*
|
|
* MDP5/DPU based devices don't have a flat hierarchy. There is a top
|
|
* level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
|
|
* Populate the children devices, find the MDP5/DPU node, and then add
|
|
* the interfaces to our components list.
|
|
*/
|
|
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
|
|
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
|
|
of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
|
|
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
|
|
if (ret) {
|
|
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
|
|
return ret;
|
|
}
|
|
|
|
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
|
|
if (!mdp_dev) {
|
|
DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
|
|
of_platform_depopulate(dev);
|
|
return -ENODEV;
|
|
}
|
|
|
|
put_device(mdp_dev);
|
|
|
|
/* add the MDP component itself */
|
|
drm_of_component_match_add(dev, matchptr, compare_of,
|
|
mdp_dev->of_node);
|
|
} else {
|
|
/* MDP4 */
|
|
mdp_dev = dev;
|
|
}
|
|
|
|
ret = add_components_mdp(mdp_dev, matchptr);
|
|
if (ret)
|
|
of_platform_depopulate(dev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* We don't know what's the best binding to link the gpu with the drm device.
|
|
* Fow now, we just hunt for all the possible gpus that we support, and add them
|
|
* as components.
|
|
*/
|
|
static const struct of_device_id msm_gpu_match[] = {
|
|
{ .compatible = "qcom,adreno" },
|
|
{ .compatible = "qcom,adreno-3xx" },
|
|
{ .compatible = "amd,imageon" },
|
|
{ .compatible = "qcom,kgsl-3d0" },
|
|
{ },
|
|
};
|
|
|
|
static int add_gpu_components(struct device *dev,
|
|
struct component_match **matchptr)
|
|
{
|
|
struct device_node *np;
|
|
|
|
np = of_find_matching_node(NULL, msm_gpu_match);
|
|
if (!np)
|
|
return 0;
|
|
|
|
if (of_device_is_available(np))
|
|
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
|
|
|
of_node_put(np);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int msm_drm_bind(struct device *dev)
|
|
{
|
|
return msm_drm_init(dev, &msm_driver);
|
|
}
|
|
|
|
static void msm_drm_unbind(struct device *dev)
|
|
{
|
|
msm_drm_uninit(dev);
|
|
}
|
|
|
|
static const struct component_master_ops msm_drm_ops = {
|
|
.bind = msm_drm_bind,
|
|
.unbind = msm_drm_unbind,
|
|
};
|
|
|
|
/*
|
|
* Platform driver:
|
|
*/
|
|
|
|
static int msm_pdev_probe(struct platform_device *pdev)
|
|
{
|
|
struct component_match *match = NULL;
|
|
int ret;
|
|
|
|
if (get_mdp_ver(pdev)) {
|
|
ret = add_display_components(&pdev->dev, &match);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
ret = add_gpu_components(&pdev->dev, &match);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
/* on all devices that I am aware of, iommu's which can map
|
|
* any address the cpu can see are used:
|
|
*/
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
of_platform_depopulate(&pdev->dev);
|
|
return ret;
|
|
}
|
|
|
|
static int msm_pdev_remove(struct platform_device *pdev)
|
|
{
|
|
component_master_del(&pdev->dev, &msm_drm_ops);
|
|
of_platform_depopulate(&pdev->dev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct of_device_id dt_match[] = {
|
|
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
|
|
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
|
|
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
|
|
{ .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
|
|
{}
|
|
};
|
|
MODULE_DEVICE_TABLE(of, dt_match);
|
|
|
|
static struct platform_driver msm_platform_driver = {
|
|
.probe = msm_pdev_probe,
|
|
.remove = msm_pdev_remove,
|
|
.driver = {
|
|
.name = "msm",
|
|
.of_match_table = dt_match,
|
|
.pm = &msm_pm_ops,
|
|
},
|
|
};
|
|
|
|
static int __init msm_drm_register(void)
|
|
{
|
|
if (!modeset)
|
|
return -EINVAL;
|
|
|
|
DBG("init");
|
|
msm_mdp_register();
|
|
msm_dpu_register();
|
|
msm_dsi_register();
|
|
msm_edp_register();
|
|
msm_hdmi_register();
|
|
adreno_register();
|
|
return platform_driver_register(&msm_platform_driver);
|
|
}
|
|
|
|
static void __exit msm_drm_unregister(void)
|
|
{
|
|
DBG("fini");
|
|
platform_driver_unregister(&msm_platform_driver);
|
|
msm_hdmi_unregister();
|
|
adreno_unregister();
|
|
msm_edp_unregister();
|
|
msm_dsi_unregister();
|
|
msm_mdp_unregister();
|
|
msm_dpu_unregister();
|
|
}
|
|
|
|
module_init(msm_drm_register);
|
|
module_exit(msm_drm_unregister);
|
|
|
|
MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
|
|
MODULE_DESCRIPTION("MSM DRM Driver");
|
|
MODULE_LICENSE("GPL");
|