2019-06-03 12:44:50 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-12-01 04:12:10 +07:00
|
|
|
/*
|
2018-06-28 02:26:09 +07:00
|
|
|
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
2013-12-01 04:12:10 +07:00
|
|
|
* Copyright (C) 2013 Red Hat
|
|
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MSM_KMS_H__
|
|
|
|
#define __MSM_KMS_H__
|
|
|
|
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/regulator/consumer.h>
|
|
|
|
|
|
|
|
#include "msm_drv.h"
|
|
|
|
|
2014-12-08 22:48:57 +07:00
|
|
|
#define MAX_PLANE 4
|
|
|
|
|
2013-12-01 04:12:10 +07:00
|
|
|
/* As there are different display controller blocks depending on the
|
|
|
|
* snapdragon version, the kms support is split out and the appropriate
|
|
|
|
* implementation is loaded at runtime. The kms module is responsible
|
|
|
|
* for constructing the appropriate planes/crtcs/encoders/connectors.
|
|
|
|
*/
|
|
|
|
struct msm_kms_funcs {
|
|
|
|
/* hw initialization: */
|
|
|
|
int (*hw_init)(struct msm_kms *kms);
|
|
|
|
/* irq handling: */
|
|
|
|
void (*irq_preinstall)(struct msm_kms *kms);
|
|
|
|
int (*irq_postinstall)(struct msm_kms *kms);
|
|
|
|
void (*irq_uninstall)(struct msm_kms *kms);
|
|
|
|
irqreturn_t (*irq)(struct msm_kms *kms);
|
|
|
|
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
|
|
|
|
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
|
2019-08-29 23:45:14 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomic commit handling:
|
2019-08-29 23:45:16 +07:00
|
|
|
*
|
|
|
|
* Note that in the case of async commits, the funcs which take
|
|
|
|
* a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
|
|
|
|
* might not be evenly balanced with ->prepare_commit(), however
|
|
|
|
* each crtc that effected by a ->prepare_commit() (potentially
|
|
|
|
* multiple times) will eventually (at end of vsync period) be
|
|
|
|
* flushed and completed.
|
|
|
|
*
|
|
|
|
* This has some implications about tracking of cleanup state,
|
|
|
|
* for example SMP blocks to release after commit completes. Ie.
|
|
|
|
* cleanup state should be also duplicated in the various
|
|
|
|
* duplicate_state() methods, as the current cleanup state at
|
|
|
|
* ->complete_commit() time may have accumulated cleanup work
|
|
|
|
* from multiple commits.
|
2019-08-29 23:45:14 +07:00
|
|
|
*/
|
|
|
|
|
2019-08-29 23:45:15 +07:00
|
|
|
/**
|
|
|
|
* Enable/disable power/clks needed for hw access done in other
|
|
|
|
* commit related methods.
|
|
|
|
*
|
|
|
|
* If mdp4 is migrated to runpm, we could probably drop these
|
|
|
|
* and use runpm directly.
|
|
|
|
*/
|
|
|
|
void (*enable_commit)(struct msm_kms *kms);
|
|
|
|
void (*disable_commit)(struct msm_kms *kms);
|
|
|
|
|
2019-08-29 23:45:16 +07:00
|
|
|
/**
|
|
|
|
* If the kms backend supports async commit, it should implement
|
|
|
|
* this method to return the time of the next vsync. This is
|
|
|
|
* used to determine a time slightly before vsync, for the async
|
|
|
|
* commit timer to run and complete an async commit.
|
|
|
|
*/
|
|
|
|
ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc);
|
|
|
|
|
2019-08-29 23:45:14 +07:00
|
|
|
/**
|
|
|
|
* Prepare for atomic commit. This is called after any previous
|
|
|
|
* (async or otherwise) commit has completed.
|
|
|
|
*/
|
2015-01-31 05:04:45 +07:00
|
|
|
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
|
2019-08-29 23:45:14 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush an atomic commit. This is called after the hardware
|
|
|
|
* updates have already been pushed down to effected planes/
|
|
|
|
* crtcs/encoders/connectors.
|
|
|
|
*/
|
|
|
|
void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wait for any in-progress flush to complete on the specified
|
|
|
|
* crtcs. This should not block if there is no in-progress
|
|
|
|
* commit (ie. don't just wait for a vblank), as it will also
|
|
|
|
* be called before ->prepare_commit() to ensure any potential
|
|
|
|
* "async" commit has completed.
|
|
|
|
*/
|
2019-08-29 23:45:12 +07:00
|
|
|
void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
|
|
|
|
|
2019-08-29 23:45:14 +07:00
|
|
|
/**
|
|
|
|
* Clean up after commit is completed. This is called after
|
|
|
|
* ->wait_flush(), to give the backend a chance to do any
|
|
|
|
* post-commit cleanup.
|
|
|
|
*/
|
|
|
|
void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Format handling:
|
|
|
|
*/
|
|
|
|
|
2018-02-14 00:42:44 +07:00
|
|
|
/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
|
|
|
|
const struct msm_format *(*get_format)(struct msm_kms *kms,
|
|
|
|
const uint32_t format,
|
|
|
|
const uint64_t modifiers);
|
2018-06-28 02:26:09 +07:00
|
|
|
/* do format checking on format modified through fb_cmd2 modifiers */
|
|
|
|
int (*check_modified_format)(const struct msm_kms *kms,
|
|
|
|
const struct msm_format *msm_fmt,
|
|
|
|
const struct drm_mode_fb_cmd2 *cmd,
|
|
|
|
struct drm_gem_object **bos);
|
2019-08-29 23:45:14 +07:00
|
|
|
|
2013-12-01 04:12:10 +07:00
|
|
|
/* misc: */
|
|
|
|
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
|
|
|
|
struct drm_encoder *encoder);
|
2015-03-27 06:25:15 +07:00
|
|
|
int (*set_split_display)(struct msm_kms *kms,
|
|
|
|
struct drm_encoder *encoder,
|
|
|
|
struct drm_encoder *slave_encoder,
|
|
|
|
bool is_cmd_mode);
|
2016-12-05 16:54:53 +07:00
|
|
|
void (*set_encoder_mode)(struct msm_kms *kms,
|
|
|
|
struct drm_encoder *encoder,
|
|
|
|
bool cmd_mode);
|
2013-12-01 04:12:10 +07:00
|
|
|
/* cleanup: */
|
|
|
|
void (*destroy)(struct msm_kms *kms);
|
2016-10-27 01:06:55 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
/* debugfs: */
|
|
|
|
int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
|
|
|
|
#endif
|
2013-12-01 04:12:10 +07:00
|
|
|
};
|
|
|
|
|
2019-08-29 23:45:16 +07:00
|
|
|
struct msm_kms;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A per-crtc timer for pending async atomic flushes. Scheduled to expire
|
|
|
|
* shortly before vblank to flush pending async updates.
|
|
|
|
*/
|
|
|
|
struct msm_pending_timer {
|
|
|
|
struct hrtimer timer;
|
|
|
|
struct work_struct work;
|
|
|
|
struct msm_kms *kms;
|
|
|
|
unsigned crtc_idx;
|
|
|
|
};
|
|
|
|
|
2013-12-01 04:12:10 +07:00
|
|
|
struct msm_kms {
|
|
|
|
const struct msm_kms_funcs *funcs;
|
2019-08-29 23:45:16 +07:00
|
|
|
struct drm_device *dev;
|
2016-05-18 16:36:03 +07:00
|
|
|
|
|
|
|
/* irq number to be passed on to drm_irq_install */
|
|
|
|
int irq;
|
2017-06-13 21:22:37 +07:00
|
|
|
|
|
|
|
/* mapper-id used to request GEM buffer mapped for scanout: */
|
|
|
|
struct msm_gem_address_space *aspace;
|
2019-08-29 23:45:16 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For async commit, where ->flush_commit() and later happens
|
|
|
|
* from the crtc's pending_timer close to end of the frame:
|
|
|
|
*/
|
|
|
|
struct mutex commit_lock;
|
|
|
|
unsigned pending_crtc_mask;
|
|
|
|
struct msm_pending_timer pending_timers[MAX_CRTCS];
|
2013-12-01 04:12:10 +07:00
|
|
|
};
|
|
|
|
|
2013-12-01 05:24:22 +07:00
|
|
|
static inline void msm_kms_init(struct msm_kms *kms,
|
|
|
|
const struct msm_kms_funcs *funcs)
|
|
|
|
{
|
2019-08-29 23:45:16 +07:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
mutex_init(&kms->commit_lock);
|
2013-12-01 05:24:22 +07:00
|
|
|
kms->funcs = funcs;
|
2019-08-29 23:45:16 +07:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
|
|
|
|
msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
|
2013-12-01 05:24:22 +07:00
|
|
|
}
|
|
|
|
|
2013-12-01 04:12:10 +07:00
|
|
|
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
|
drm/msm: add mdp5/apq8x74
Add support for the new MDP5 display controller block. The mapping
between parts of the display controller and KMS is:
plane -> PIPE{RGBn,VIGn} \
crtc -> LM (layer mixer) |-> MDP "device"
encoder -> INTF /
connector -> HDMI/DSI/eDP/etc --> other device(s)
Unlike MDP4, it appears we can get by with a single encoder, rather
than needing a different implementation for DTV, DSI, etc. (Ie. the
register interface is same, just different bases.)
Also unlike MDP4, all the IRQs for other blocks (HDMI, DSI, etc) are
routed through MDP.
And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
which blocks need to be allocated to the active pipes based on fetch
stride.
Signed-off-by: Rob Clark <robdclark@gmail.com>
2013-12-01 05:51:47 +07:00
|
|
|
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
|
2018-06-28 02:26:09 +07:00
|
|
|
struct msm_kms *dpu_kms_init(struct drm_device *dev);
|
2018-06-22 03:06:10 +07:00
|
|
|
|
|
|
|
struct msm_mdss_funcs {
|
|
|
|
int (*enable)(struct msm_mdss *mdss);
|
|
|
|
int (*disable)(struct msm_mdss *mdss);
|
|
|
|
void (*destroy)(struct drm_device *dev);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct msm_mdss {
|
|
|
|
struct drm_device *dev;
|
|
|
|
const struct msm_mdss_funcs *funcs;
|
|
|
|
};
|
|
|
|
|
|
|
|
int mdp5_mdss_init(struct drm_device *dev);
|
2018-06-28 02:26:09 +07:00
|
|
|
int dpu_mdss_init(struct drm_device *dev);
|
2013-12-01 04:12:10 +07:00
|
|
|
|
2019-08-29 23:45:12 +07:00
|
|
|
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
|
|
|
|
drm_for_each_crtc(crtc, dev) \
|
|
|
|
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
|
|
|
|
|
2013-12-01 04:12:10 +07:00
|
|
|
#endif /* __MSM_KMS_H__ */
|