mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 06:05:12 +07:00
ef631f8250
DSS IRQ number can be obtained from platform_get_irq(). This API in turn picks the right IRQ number belonging to HW IP from the hwmod database. So hardcoding of IRQ number could be removed. This IRQ is stored in dss_irq as part of dss structure, and freed it in dss_exit(). Reviewed-by: Paul Walmsley <paul@pwsan.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Tested-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Senthilvadivu Guruswamy <svadivu@ti.com> Signed-off-by: Sumit Semwal <sumit.semwal@ti.com> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
1038 lines
21 KiB
C
1038 lines
21 KiB
C
/*
|
|
* linux/drivers/video/omap2/dss/dss.c
|
|
*
|
|
* Copyright (C) 2009 Nokia Corporation
|
|
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
|
|
*
|
|
* Some code and ideas taken from drivers/video/omap/ driver
|
|
* by Imre Deak.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
* the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#define DSS_SUBSYS_NAME "DSS"
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/io.h>
|
|
#include <linux/err.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/clk.h>
|
|
|
|
#include <plat/display.h>
|
|
#include <plat/clock.h>
|
|
#include "dss.h"
|
|
|
|
#define DSS_SZ_REGS SZ_512
|
|
|
|
struct dss_reg {
|
|
u16 idx;
|
|
};
|
|
|
|
#define DSS_REG(idx) ((const struct dss_reg) { idx })
|
|
|
|
#define DSS_REVISION DSS_REG(0x0000)
|
|
#define DSS_SYSCONFIG DSS_REG(0x0010)
|
|
#define DSS_SYSSTATUS DSS_REG(0x0014)
|
|
#define DSS_IRQSTATUS DSS_REG(0x0018)
|
|
#define DSS_CONTROL DSS_REG(0x0040)
|
|
#define DSS_SDI_CONTROL DSS_REG(0x0044)
|
|
#define DSS_PLL_CONTROL DSS_REG(0x0048)
|
|
#define DSS_SDI_STATUS DSS_REG(0x005C)
|
|
|
|
#define REG_GET(idx, start, end) \
|
|
FLD_GET(dss_read_reg(idx), start, end)
|
|
|
|
#define REG_FLD_MOD(idx, val, start, end) \
|
|
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
|
|
|
|
static struct {
|
|
struct platform_device *pdev;
|
|
void __iomem *base;
|
|
int ctx_id;
|
|
|
|
struct clk *dpll4_m4_ck;
|
|
struct clk *dss_ick;
|
|
struct clk *dss1_fck;
|
|
struct clk *dss2_fck;
|
|
struct clk *dss_54m_fck;
|
|
struct clk *dss_96m_fck;
|
|
unsigned num_clks_enabled;
|
|
|
|
unsigned long cache_req_pck;
|
|
unsigned long cache_prate;
|
|
struct dss_clock_info cache_dss_cinfo;
|
|
struct dispc_clock_info cache_dispc_cinfo;
|
|
|
|
enum dss_clk_source dsi_clk_source;
|
|
enum dss_clk_source dispc_clk_source;
|
|
|
|
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
|
|
int dss_irq;
|
|
} dss;
|
|
|
|
static void dss_clk_enable_all_no_ctx(void);
|
|
static void dss_clk_disable_all_no_ctx(void);
|
|
static void dss_clk_enable_no_ctx(enum dss_clock clks);
|
|
static void dss_clk_disable_no_ctx(enum dss_clock clks);
|
|
|
|
static int _omap_dss_wait_reset(void);
|
|
|
|
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
|
|
{
|
|
__raw_writel(val, dss.base + idx.idx);
|
|
}
|
|
|
|
static inline u32 dss_read_reg(const struct dss_reg idx)
|
|
{
|
|
return __raw_readl(dss.base + idx.idx);
|
|
}
|
|
|
|
#define SR(reg) \
|
|
dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
|
|
#define RR(reg) \
|
|
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
|
|
|
|
void dss_save_context(void)
|
|
{
|
|
if (cpu_is_omap24xx())
|
|
return;
|
|
|
|
SR(SYSCONFIG);
|
|
SR(CONTROL);
|
|
|
|
#ifdef CONFIG_OMAP2_DSS_SDI
|
|
SR(SDI_CONTROL);
|
|
SR(PLL_CONTROL);
|
|
#endif
|
|
}
|
|
|
|
void dss_restore_context(void)
|
|
{
|
|
if (_omap_dss_wait_reset())
|
|
DSSERR("DSS not coming out of reset after sleep\n");
|
|
|
|
RR(SYSCONFIG);
|
|
RR(CONTROL);
|
|
|
|
#ifdef CONFIG_OMAP2_DSS_SDI
|
|
RR(SDI_CONTROL);
|
|
RR(PLL_CONTROL);
|
|
#endif
|
|
}
|
|
|
|
#undef SR
|
|
#undef RR
|
|
|
|
void dss_sdi_init(u8 datapairs)
|
|
{
|
|
u32 l;
|
|
|
|
BUG_ON(datapairs > 3 || datapairs < 1);
|
|
|
|
l = dss_read_reg(DSS_SDI_CONTROL);
|
|
l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
|
|
l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
|
|
l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
|
|
dss_write_reg(DSS_SDI_CONTROL, l);
|
|
|
|
l = dss_read_reg(DSS_PLL_CONTROL);
|
|
l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
|
|
l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
|
|
l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
|
|
dss_write_reg(DSS_PLL_CONTROL, l);
|
|
}
|
|
|
|
int dss_sdi_enable(void)
|
|
{
|
|
unsigned long timeout;
|
|
|
|
dispc_pck_free_enable(1);
|
|
|
|
/* Reset SDI PLL */
|
|
REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
|
|
udelay(1); /* wait 2x PCLK */
|
|
|
|
/* Lock SDI PLL */
|
|
REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
|
|
|
|
/* Waiting for PLL lock request to complete */
|
|
timeout = jiffies + msecs_to_jiffies(500);
|
|
while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
|
|
if (time_after_eq(jiffies, timeout)) {
|
|
DSSERR("PLL lock request timed out\n");
|
|
goto err1;
|
|
}
|
|
}
|
|
|
|
/* Clearing PLL_GO bit */
|
|
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
|
|
|
|
/* Waiting for PLL to lock */
|
|
timeout = jiffies + msecs_to_jiffies(500);
|
|
while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
|
|
if (time_after_eq(jiffies, timeout)) {
|
|
DSSERR("PLL lock timed out\n");
|
|
goto err1;
|
|
}
|
|
}
|
|
|
|
dispc_lcd_enable_signal(1);
|
|
|
|
/* Waiting for SDI reset to complete */
|
|
timeout = jiffies + msecs_to_jiffies(500);
|
|
while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
|
|
if (time_after_eq(jiffies, timeout)) {
|
|
DSSERR("SDI reset timed out\n");
|
|
goto err2;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
err2:
|
|
dispc_lcd_enable_signal(0);
|
|
err1:
|
|
/* Reset SDI PLL */
|
|
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
|
|
|
|
dispc_pck_free_enable(0);
|
|
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
void dss_sdi_disable(void)
|
|
{
|
|
dispc_lcd_enable_signal(0);
|
|
|
|
dispc_pck_free_enable(0);
|
|
|
|
/* Reset SDI PLL */
|
|
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
|
|
}
|
|
|
|
void dss_dump_clocks(struct seq_file *s)
|
|
{
|
|
unsigned long dpll4_ck_rate;
|
|
unsigned long dpll4_m4_ck_rate;
|
|
|
|
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
|
|
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
|
|
dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
|
|
|
|
seq_printf(s, "- DSS -\n");
|
|
|
|
seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
|
|
|
|
if (cpu_is_omap3630())
|
|
seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
|
|
dpll4_ck_rate,
|
|
dpll4_ck_rate / dpll4_m4_ck_rate,
|
|
dss_clk_get_rate(DSS_CLK_FCK1));
|
|
else
|
|
seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
|
|
dpll4_ck_rate,
|
|
dpll4_ck_rate / dpll4_m4_ck_rate,
|
|
dss_clk_get_rate(DSS_CLK_FCK1));
|
|
|
|
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
}
|
|
|
|
void dss_dump_regs(struct seq_file *s)
|
|
{
|
|
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
|
|
|
|
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
|
|
DUMPREG(DSS_REVISION);
|
|
DUMPREG(DSS_SYSCONFIG);
|
|
DUMPREG(DSS_SYSSTATUS);
|
|
DUMPREG(DSS_IRQSTATUS);
|
|
DUMPREG(DSS_CONTROL);
|
|
DUMPREG(DSS_SDI_CONTROL);
|
|
DUMPREG(DSS_PLL_CONTROL);
|
|
DUMPREG(DSS_SDI_STATUS);
|
|
|
|
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
#undef DUMPREG
|
|
}
|
|
|
|
void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
|
|
{
|
|
int b;
|
|
|
|
BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
|
|
clk_src != DSS_SRC_DSS1_ALWON_FCLK);
|
|
|
|
b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
|
|
|
|
if (clk_src == DSS_SRC_DSI1_PLL_FCLK)
|
|
dsi_wait_dsi1_pll_active();
|
|
|
|
REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
|
|
|
|
dss.dispc_clk_source = clk_src;
|
|
}
|
|
|
|
void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
|
|
{
|
|
int b;
|
|
|
|
BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
|
|
clk_src != DSS_SRC_DSS1_ALWON_FCLK);
|
|
|
|
b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
|
|
|
|
if (clk_src == DSS_SRC_DSI2_PLL_FCLK)
|
|
dsi_wait_dsi2_pll_active();
|
|
|
|
REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
|
|
|
|
dss.dsi_clk_source = clk_src;
|
|
}
|
|
|
|
enum dss_clk_source dss_get_dispc_clk_source(void)
|
|
{
|
|
return dss.dispc_clk_source;
|
|
}
|
|
|
|
enum dss_clk_source dss_get_dsi_clk_source(void)
|
|
{
|
|
return dss.dsi_clk_source;
|
|
}
|
|
|
|
/* calculate clock rates using dividers in cinfo */
|
|
int dss_calc_clock_rates(struct dss_clock_info *cinfo)
|
|
{
|
|
unsigned long prate;
|
|
|
|
if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
|
|
cinfo->fck_div == 0)
|
|
return -EINVAL;
|
|
|
|
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
|
|
|
|
cinfo->fck = prate / cinfo->fck_div;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dss_set_clock_div(struct dss_clock_info *cinfo)
|
|
{
|
|
unsigned long prate;
|
|
int r;
|
|
|
|
if (cpu_is_omap34xx()) {
|
|
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
|
|
DSSDBG("dpll4_m4 = %ld\n", prate);
|
|
|
|
r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
|
|
if (r)
|
|
return r;
|
|
}
|
|
|
|
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dss_get_clock_div(struct dss_clock_info *cinfo)
|
|
{
|
|
cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
|
|
|
|
if (cpu_is_omap34xx()) {
|
|
unsigned long prate;
|
|
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
|
|
if (cpu_is_omap3630())
|
|
cinfo->fck_div = prate / (cinfo->fck);
|
|
else
|
|
cinfo->fck_div = prate / (cinfo->fck / 2);
|
|
} else {
|
|
cinfo->fck_div = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
unsigned long dss_get_dpll4_rate(void)
|
|
{
|
|
if (cpu_is_omap34xx())
|
|
return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
|
|
struct dss_clock_info *dss_cinfo,
|
|
struct dispc_clock_info *dispc_cinfo)
|
|
{
|
|
unsigned long prate;
|
|
struct dss_clock_info best_dss;
|
|
struct dispc_clock_info best_dispc;
|
|
|
|
unsigned long fck;
|
|
|
|
u16 fck_div;
|
|
|
|
int match = 0;
|
|
int min_fck_per_pck;
|
|
|
|
prate = dss_get_dpll4_rate();
|
|
|
|
fck = dss_clk_get_rate(DSS_CLK_FCK1);
|
|
if (req_pck == dss.cache_req_pck &&
|
|
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
|
|
dss.cache_dss_cinfo.fck == fck)) {
|
|
DSSDBG("dispc clock info found from cache.\n");
|
|
*dss_cinfo = dss.cache_dss_cinfo;
|
|
*dispc_cinfo = dss.cache_dispc_cinfo;
|
|
return 0;
|
|
}
|
|
|
|
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
|
|
|
|
if (min_fck_per_pck &&
|
|
req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
|
|
DSSERR("Requested pixel clock not possible with the current "
|
|
"OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
|
|
"the constraint off.\n");
|
|
min_fck_per_pck = 0;
|
|
}
|
|
|
|
retry:
|
|
memset(&best_dss, 0, sizeof(best_dss));
|
|
memset(&best_dispc, 0, sizeof(best_dispc));
|
|
|
|
if (cpu_is_omap24xx()) {
|
|
struct dispc_clock_info cur_dispc;
|
|
/* XXX can we change the clock on omap2? */
|
|
fck = dss_clk_get_rate(DSS_CLK_FCK1);
|
|
fck_div = 1;
|
|
|
|
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
|
|
match = 1;
|
|
|
|
best_dss.fck = fck;
|
|
best_dss.fck_div = fck_div;
|
|
|
|
best_dispc = cur_dispc;
|
|
|
|
goto found;
|
|
} else if (cpu_is_omap34xx()) {
|
|
for (fck_div = (cpu_is_omap3630() ? 32 : 16);
|
|
fck_div > 0; --fck_div) {
|
|
struct dispc_clock_info cur_dispc;
|
|
|
|
if (cpu_is_omap3630())
|
|
fck = prate / fck_div;
|
|
else
|
|
fck = prate / fck_div * 2;
|
|
|
|
if (fck > DISPC_MAX_FCK)
|
|
continue;
|
|
|
|
if (min_fck_per_pck &&
|
|
fck < req_pck * min_fck_per_pck)
|
|
continue;
|
|
|
|
match = 1;
|
|
|
|
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
|
|
|
|
if (abs(cur_dispc.pck - req_pck) <
|
|
abs(best_dispc.pck - req_pck)) {
|
|
|
|
best_dss.fck = fck;
|
|
best_dss.fck_div = fck_div;
|
|
|
|
best_dispc = cur_dispc;
|
|
|
|
if (cur_dispc.pck == req_pck)
|
|
goto found;
|
|
}
|
|
}
|
|
} else {
|
|
BUG();
|
|
}
|
|
|
|
found:
|
|
if (!match) {
|
|
if (min_fck_per_pck) {
|
|
DSSERR("Could not find suitable clock settings.\n"
|
|
"Turning FCK/PCK constraint off and"
|
|
"trying again.\n");
|
|
min_fck_per_pck = 0;
|
|
goto retry;
|
|
}
|
|
|
|
DSSERR("Could not find suitable clock settings.\n");
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (dss_cinfo)
|
|
*dss_cinfo = best_dss;
|
|
if (dispc_cinfo)
|
|
*dispc_cinfo = best_dispc;
|
|
|
|
dss.cache_req_pck = req_pck;
|
|
dss.cache_prate = prate;
|
|
dss.cache_dss_cinfo = best_dss;
|
|
dss.cache_dispc_cinfo = best_dispc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
|
|
{
|
|
dispc_irq_handler();
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
|
|
{
|
|
u32 irqstatus;
|
|
|
|
irqstatus = dss_read_reg(DSS_IRQSTATUS);
|
|
|
|
if (irqstatus & (1<<0)) /* DISPC_IRQ */
|
|
dispc_irq_handler();
|
|
#ifdef CONFIG_OMAP2_DSS_DSI
|
|
if (irqstatus & (1<<1)) /* DSI_IRQ */
|
|
dsi_irq_handler();
|
|
#endif
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static int _omap_dss_wait_reset(void)
|
|
{
|
|
int t = 0;
|
|
|
|
while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
|
|
if (++t > 1000) {
|
|
DSSERR("soft reset failed\n");
|
|
return -ENODEV;
|
|
}
|
|
udelay(1);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int _omap_dss_reset(void)
|
|
{
|
|
/* Soft reset */
|
|
REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
|
|
return _omap_dss_wait_reset();
|
|
}
|
|
|
|
void dss_set_venc_output(enum omap_dss_venc_type type)
|
|
{
|
|
int l = 0;
|
|
|
|
if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
|
|
l = 0;
|
|
else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
|
|
l = 1;
|
|
else
|
|
BUG();
|
|
|
|
/* venc out selection. 0 = comp, 1 = svideo */
|
|
REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
|
|
}
|
|
|
|
void dss_set_dac_pwrdn_bgz(bool enable)
|
|
{
|
|
REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
|
|
}
|
|
|
|
static int dss_init(bool skip_init)
|
|
{
|
|
int r;
|
|
u32 rev;
|
|
struct resource *dss_mem;
|
|
|
|
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
|
|
if (!dss_mem) {
|
|
DSSERR("can't get IORESOURCE_MEM DSS\n");
|
|
r = -EINVAL;
|
|
goto fail0;
|
|
}
|
|
dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
|
|
if (!dss.base) {
|
|
DSSERR("can't ioremap DSS\n");
|
|
r = -ENOMEM;
|
|
goto fail0;
|
|
}
|
|
|
|
if (!skip_init) {
|
|
/* disable LCD and DIGIT output. This seems to fix the synclost
|
|
* problem that we get, if the bootloader starts the DSS and
|
|
* the kernel resets it */
|
|
omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
|
|
|
|
/* We need to wait here a bit, otherwise we sometimes start to
|
|
* get synclost errors, and after that only power cycle will
|
|
* restore DSS functionality. I have no idea why this happens.
|
|
* And we have to wait _before_ resetting the DSS, but after
|
|
* enabling clocks.
|
|
*/
|
|
msleep(50);
|
|
|
|
_omap_dss_reset();
|
|
}
|
|
|
|
/* autoidle */
|
|
REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
|
|
|
|
/* Select DPLL */
|
|
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
|
|
|
|
#ifdef CONFIG_OMAP2_DSS_VENC
|
|
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
|
|
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
|
|
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
|
|
#endif
|
|
|
|
dss.dss_irq = platform_get_irq(dss.pdev, 0);
|
|
if (dss.dss_irq < 0) {
|
|
DSSERR("omap2 dss: platform_get_irq failed\n");
|
|
r = -ENODEV;
|
|
goto fail1;
|
|
}
|
|
|
|
r = request_irq(dss.dss_irq,
|
|
cpu_is_omap24xx()
|
|
? dss_irq_handler_omap2
|
|
: dss_irq_handler_omap3,
|
|
0, "OMAP DSS", NULL);
|
|
|
|
if (r < 0) {
|
|
DSSERR("omap2 dss: request_irq failed\n");
|
|
goto fail1;
|
|
}
|
|
|
|
if (cpu_is_omap34xx()) {
|
|
dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
|
|
if (IS_ERR(dss.dpll4_m4_ck)) {
|
|
DSSERR("Failed to get dpll4_m4_ck\n");
|
|
r = PTR_ERR(dss.dpll4_m4_ck);
|
|
goto fail2;
|
|
}
|
|
}
|
|
|
|
dss.dsi_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
|
|
dss.dispc_clk_source = DSS_SRC_DSS1_ALWON_FCLK;
|
|
|
|
dss_save_context();
|
|
|
|
rev = dss_read_reg(DSS_REVISION);
|
|
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
|
|
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
|
|
|
|
return 0;
|
|
|
|
fail2:
|
|
free_irq(dss.dss_irq, NULL);
|
|
fail1:
|
|
iounmap(dss.base);
|
|
fail0:
|
|
return r;
|
|
}
|
|
|
|
static void dss_exit(void)
|
|
{
|
|
if (cpu_is_omap34xx())
|
|
clk_put(dss.dpll4_m4_ck);
|
|
|
|
free_irq(dss.dss_irq, NULL);
|
|
|
|
iounmap(dss.base);
|
|
}
|
|
|
|
/* CONTEXT */
|
|
static int dss_get_ctx_id(void)
|
|
{
|
|
struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
|
|
int r;
|
|
|
|
if (!pdata->board_data->get_last_off_on_transaction_id)
|
|
return 0;
|
|
r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
|
|
if (r < 0) {
|
|
dev_err(&dss.pdev->dev, "getting transaction ID failed, "
|
|
"will force context restore\n");
|
|
r = -1;
|
|
}
|
|
return r;
|
|
}
|
|
|
|
int dss_need_ctx_restore(void)
|
|
{
|
|
int id = dss_get_ctx_id();
|
|
|
|
if (id < 0 || id != dss.ctx_id) {
|
|
DSSDBG("ctx id %d -> id %d\n",
|
|
dss.ctx_id, id);
|
|
dss.ctx_id = id;
|
|
return 1;
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static void save_all_ctx(void)
|
|
{
|
|
DSSDBG("save context\n");
|
|
|
|
dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
|
|
dss_save_context();
|
|
dispc_save_context();
|
|
#ifdef CONFIG_OMAP2_DSS_DSI
|
|
dsi_save_context();
|
|
#endif
|
|
|
|
dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
|
|
}
|
|
|
|
static void restore_all_ctx(void)
|
|
{
|
|
DSSDBG("restore context\n");
|
|
|
|
dss_clk_enable_all_no_ctx();
|
|
|
|
dss_restore_context();
|
|
dispc_restore_context();
|
|
#ifdef CONFIG_OMAP2_DSS_DSI
|
|
dsi_restore_context();
|
|
#endif
|
|
|
|
dss_clk_disable_all_no_ctx();
|
|
}
|
|
|
|
static int dss_get_clock(struct clk **clock, const char *clk_name)
|
|
{
|
|
struct clk *clk;
|
|
|
|
clk = clk_get(&dss.pdev->dev, clk_name);
|
|
|
|
if (IS_ERR(clk)) {
|
|
DSSERR("can't get clock %s", clk_name);
|
|
return PTR_ERR(clk);
|
|
}
|
|
|
|
*clock = clk;
|
|
|
|
DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dss_get_clocks(void)
|
|
{
|
|
int r;
|
|
|
|
dss.dss_ick = NULL;
|
|
dss.dss1_fck = NULL;
|
|
dss.dss2_fck = NULL;
|
|
dss.dss_54m_fck = NULL;
|
|
dss.dss_96m_fck = NULL;
|
|
|
|
r = dss_get_clock(&dss.dss_ick, "ick");
|
|
if (r)
|
|
goto err;
|
|
|
|
r = dss_get_clock(&dss.dss1_fck, "dss1_fck");
|
|
if (r)
|
|
goto err;
|
|
|
|
r = dss_get_clock(&dss.dss2_fck, "dss2_fck");
|
|
if (r)
|
|
goto err;
|
|
|
|
r = dss_get_clock(&dss.dss_54m_fck, "tv_fck");
|
|
if (r)
|
|
goto err;
|
|
|
|
r = dss_get_clock(&dss.dss_96m_fck, "video_fck");
|
|
if (r)
|
|
goto err;
|
|
|
|
return 0;
|
|
|
|
err:
|
|
if (dss.dss_ick)
|
|
clk_put(dss.dss_ick);
|
|
if (dss.dss1_fck)
|
|
clk_put(dss.dss1_fck);
|
|
if (dss.dss2_fck)
|
|
clk_put(dss.dss2_fck);
|
|
if (dss.dss_54m_fck)
|
|
clk_put(dss.dss_54m_fck);
|
|
if (dss.dss_96m_fck)
|
|
clk_put(dss.dss_96m_fck);
|
|
|
|
return r;
|
|
}
|
|
|
|
static void dss_put_clocks(void)
|
|
{
|
|
if (dss.dss_96m_fck)
|
|
clk_put(dss.dss_96m_fck);
|
|
clk_put(dss.dss_54m_fck);
|
|
clk_put(dss.dss1_fck);
|
|
clk_put(dss.dss2_fck);
|
|
clk_put(dss.dss_ick);
|
|
}
|
|
|
|
unsigned long dss_clk_get_rate(enum dss_clock clk)
|
|
{
|
|
switch (clk) {
|
|
case DSS_CLK_ICK:
|
|
return clk_get_rate(dss.dss_ick);
|
|
case DSS_CLK_FCK1:
|
|
return clk_get_rate(dss.dss1_fck);
|
|
case DSS_CLK_FCK2:
|
|
return clk_get_rate(dss.dss2_fck);
|
|
case DSS_CLK_54M:
|
|
return clk_get_rate(dss.dss_54m_fck);
|
|
case DSS_CLK_96M:
|
|
return clk_get_rate(dss.dss_96m_fck);
|
|
}
|
|
|
|
BUG();
|
|
return 0;
|
|
}
|
|
|
|
static unsigned count_clk_bits(enum dss_clock clks)
|
|
{
|
|
unsigned num_clks = 0;
|
|
|
|
if (clks & DSS_CLK_ICK)
|
|
++num_clks;
|
|
if (clks & DSS_CLK_FCK1)
|
|
++num_clks;
|
|
if (clks & DSS_CLK_FCK2)
|
|
++num_clks;
|
|
if (clks & DSS_CLK_54M)
|
|
++num_clks;
|
|
if (clks & DSS_CLK_96M)
|
|
++num_clks;
|
|
|
|
return num_clks;
|
|
}
|
|
|
|
static void dss_clk_enable_no_ctx(enum dss_clock clks)
|
|
{
|
|
unsigned num_clks = count_clk_bits(clks);
|
|
|
|
if (clks & DSS_CLK_ICK)
|
|
clk_enable(dss.dss_ick);
|
|
if (clks & DSS_CLK_FCK1)
|
|
clk_enable(dss.dss1_fck);
|
|
if (clks & DSS_CLK_FCK2)
|
|
clk_enable(dss.dss2_fck);
|
|
if (clks & DSS_CLK_54M)
|
|
clk_enable(dss.dss_54m_fck);
|
|
if (clks & DSS_CLK_96M)
|
|
clk_enable(dss.dss_96m_fck);
|
|
|
|
dss.num_clks_enabled += num_clks;
|
|
}
|
|
|
|
void dss_clk_enable(enum dss_clock clks)
|
|
{
|
|
bool check_ctx = dss.num_clks_enabled == 0;
|
|
|
|
dss_clk_enable_no_ctx(clks);
|
|
|
|
if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
|
|
restore_all_ctx();
|
|
}
|
|
|
|
static void dss_clk_disable_no_ctx(enum dss_clock clks)
|
|
{
|
|
unsigned num_clks = count_clk_bits(clks);
|
|
|
|
if (clks & DSS_CLK_ICK)
|
|
clk_disable(dss.dss_ick);
|
|
if (clks & DSS_CLK_FCK1)
|
|
clk_disable(dss.dss1_fck);
|
|
if (clks & DSS_CLK_FCK2)
|
|
clk_disable(dss.dss2_fck);
|
|
if (clks & DSS_CLK_54M)
|
|
clk_disable(dss.dss_54m_fck);
|
|
if (clks & DSS_CLK_96M)
|
|
clk_disable(dss.dss_96m_fck);
|
|
|
|
dss.num_clks_enabled -= num_clks;
|
|
}
|
|
|
|
void dss_clk_disable(enum dss_clock clks)
|
|
{
|
|
if (cpu_is_omap34xx()) {
|
|
unsigned num_clks = count_clk_bits(clks);
|
|
|
|
BUG_ON(dss.num_clks_enabled < num_clks);
|
|
|
|
if (dss.num_clks_enabled == num_clks)
|
|
save_all_ctx();
|
|
}
|
|
|
|
dss_clk_disable_no_ctx(clks);
|
|
}
|
|
|
|
static void dss_clk_enable_all_no_ctx(void)
|
|
{
|
|
enum dss_clock clks;
|
|
|
|
clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
|
|
if (cpu_is_omap34xx())
|
|
clks |= DSS_CLK_96M;
|
|
dss_clk_enable_no_ctx(clks);
|
|
}
|
|
|
|
static void dss_clk_disable_all_no_ctx(void)
|
|
{
|
|
enum dss_clock clks;
|
|
|
|
clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
|
|
if (cpu_is_omap34xx())
|
|
clks |= DSS_CLK_96M;
|
|
dss_clk_disable_no_ctx(clks);
|
|
}
|
|
|
|
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
|
|
/* CLOCKS */
|
|
static void core_dump_clocks(struct seq_file *s)
|
|
{
|
|
int i;
|
|
struct clk *clocks[5] = {
|
|
dss.dss_ick,
|
|
dss.dss1_fck,
|
|
dss.dss2_fck,
|
|
dss.dss_54m_fck,
|
|
dss.dss_96m_fck
|
|
};
|
|
|
|
seq_printf(s, "- CORE -\n");
|
|
|
|
seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
|
|
|
|
for (i = 0; i < 5; i++) {
|
|
if (!clocks[i])
|
|
continue;
|
|
seq_printf(s, "%-15s\t%lu\t%d\n",
|
|
clocks[i]->name,
|
|
clk_get_rate(clocks[i]),
|
|
clocks[i]->usecount);
|
|
}
|
|
}
|
|
#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
|
|
|
|
/* DEBUGFS */
|
|
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
|
|
void dss_debug_dump_clocks(struct seq_file *s)
|
|
{
|
|
core_dump_clocks(s);
|
|
dss_dump_clocks(s);
|
|
dispc_dump_clocks(s);
|
|
#ifdef CONFIG_OMAP2_DSS_DSI
|
|
dsi_dump_clocks(s);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
|
|
/* DSS HW IP initialisation */
|
|
static int omap_dsshw_probe(struct platform_device *pdev)
|
|
{
|
|
int r;
|
|
int skip_init = 0;
|
|
|
|
dss.pdev = pdev;
|
|
|
|
r = dss_get_clocks();
|
|
if (r)
|
|
goto err_clocks;
|
|
|
|
dss_clk_enable_all_no_ctx();
|
|
|
|
dss.ctx_id = dss_get_ctx_id();
|
|
DSSDBG("initial ctx id %u\n", dss.ctx_id);
|
|
|
|
#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
|
|
/* DISPC_CONTROL */
|
|
if (omap_readl(0x48050440) & 1) /* LCD enabled? */
|
|
skip_init = 1;
|
|
#endif
|
|
|
|
r = dss_init(skip_init);
|
|
if (r) {
|
|
DSSERR("Failed to initialize DSS\n");
|
|
goto err_dss;
|
|
}
|
|
|
|
dss_clk_disable_all_no_ctx();
|
|
return 0;
|
|
|
|
err_dss:
|
|
dss_clk_disable_all_no_ctx();
|
|
dss_put_clocks();
|
|
err_clocks:
|
|
return r;
|
|
}
|
|
|
|
static int omap_dsshw_remove(struct platform_device *pdev)
|
|
{
|
|
|
|
dss_exit();
|
|
|
|
/*
|
|
* As part of hwmod changes, DSS is not the only controller of dss
|
|
* clocks; hwmod framework itself will also enable clocks during hwmod
|
|
* init for dss, and autoidle is set in h/w for DSS. Hence, there's no
|
|
* need to disable clocks if their usecounts > 1.
|
|
*/
|
|
WARN_ON(dss.num_clks_enabled > 0);
|
|
|
|
dss_put_clocks();
|
|
return 0;
|
|
}
|
|
|
|
static struct platform_driver omap_dsshw_driver = {
|
|
.probe = omap_dsshw_probe,
|
|
.remove = omap_dsshw_remove,
|
|
.driver = {
|
|
.name = "omapdss_dss",
|
|
.owner = THIS_MODULE,
|
|
},
|
|
};
|
|
|
|
int dss_init_platform_driver(void)
|
|
{
|
|
return platform_driver_register(&omap_dsshw_driver);
|
|
}
|
|
|
|
void dss_uninit_platform_driver(void)
|
|
{
|
|
return platform_driver_unregister(&omap_dsshw_driver);
|
|
}
|