Merge tag 'drm-msm-next-2018-10-07' of git://people.freedesktop.org/~robclark/linux into drm-next

This time mostly further refinement of dpu1+a6xx for sdm845 and
beyond.. and hurray for more negative diffstat :-)

- Misc cleanups and fixes
- GPU preemption optimization
- a6xx perf improvements and clock fixes (ie. lets actually not run at
  minimum clks)
- a6xx devfreq/DCVS
- Lots of code cleanup across dpu (Bruce, Jeykumar, Sean)
- Fixed a few crashes on startup relating to dsi (Sean)
- Add cursor support (Sravanthi, Sean)
- Properly free mdss irq on destroy (Jordan)
- Use correct encoder_type when initializing, fixes crash on boot (Stephen)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsNevCzMiLuNW1EVN6gtP3JZSir6PfnWvnCavSZM+bUFQ@mail.gmail.com
This commit is contained in:
Dave Airlie 2018-10-08 16:45:56 +10:00
commit d995052cad
60 changed files with 1908 additions and 3245 deletions

View File

@ -58,7 +58,6 @@ msm-y := \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:

View File

@ -132,14 +132,14 @@ reset_set(void *data, u64 val)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_unreference(a5xx_gpu->pm4_bo);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_unreference(a5xx_gpu->pfp_bo);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}

View File

@ -1234,7 +1234,7 @@ static void a5xx_crashdumper_free(struct msm_gpu *gpu,
msm_gem_put_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
drm_gem_object_unreference(dumper->bo);
drm_gem_object_put(dumper->bo);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@ -1436,12 +1436,22 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
*value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
u64 busy_cycles, busy_time;
return 0;
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
busy_time = (busy_cycles - gpu->devfreq.busy_cycles);
do_div(busy_time, (clk_get_rate(gpu->core_clk) / 1000000));
gpu->devfreq.busy_cycles = busy_cycles;
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
return (unsigned long)busy_time;
}
static const struct adreno_gpu_funcs funcs = {

View File

@ -323,7 +323,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo)
drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;

View File

@ -208,6 +208,13 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
/* Always come up on rb 0 */
a5xx_gpu->cur_ring = gpu->rb[0];
/* No preemption if we only have one ring */
if (gpu->nr_rings == 1)
return;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
@ -220,9 +227,6 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
/* Always come up on rb 0 */
a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
@ -272,7 +276,7 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,6 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@ -42,9 +41,6 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
tasklet_schedule(&gmu->hfi_tasklet);
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
@ -65,12 +61,14 @@ static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
int ret;
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
((index << 24) & 0xff) | (3 & 0xf));
((3 & 0xf) << 28) | index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@ -82,7 +80,37 @@ static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
gmu->freq = gmu->gpu_freqs[index];
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index = 0;
if (freq == gmu->freq)
return;
for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
if (freq == gmu->gpu_freqs[perf_index])
break;
__a6xx_gmu_set_freq(gmu, perf_index);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
return gmu->freq;
}
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
@ -135,9 +163,6 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
u32 val;
int ret;
gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
@ -348,8 +373,23 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
{
return msm_writel(value, ptr + (offset << 2));
}
static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
const char *name);
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
if (!pdcptr || !seqptr)
goto err;
/* Disable SDE clock gating */
gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
@ -374,44 +414,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
/* Set TCS commands used by PDC sequence for low power modes */
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
/* ensure no writes happen before the uCode is fully written */
wmb();
err:
devm_iounmap(gmu->dev, pdcptr);
devm_iounmap(gmu->dev, seqptr);
}
/*
@ -547,8 +591,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
}
#define A6XX_HFI_IRQ_MASK \
(A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
@ -626,7 +669,7 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
/* Set the GPU back to the highest power frequency */
a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
if (ret)
@ -665,7 +708,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, status);
/* Set the GPU to the highest power frequency */
a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
/* Make sure to turn off the boot OOB request on error */
@ -1140,7 +1183,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, false);
of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@ -1170,11 +1213,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
/* Map the GPU power domain controller registers */
gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
if (IS_ERR(gmu->mmio))
goto err;
/* Get the HFI and GMU interrupts */
@ -1184,9 +1223,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
/* Set up a tasklet to handle GMU HFI responses */
tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);

View File

@ -4,6 +4,7 @@
#ifndef _A6XX_GMU_H_
#define _A6XX_GMU_H_
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@ -47,7 +48,6 @@ struct a6xx_gmu {
struct device *dev;
void * __iomem mmio;
void * __iomem pdc_mmio;
int hfi_irq;
int gmu_irq;
@ -74,6 +74,8 @@ struct a6xx_gmu {
unsigned long gmu_freqs[4];
u32 cx_arc_votes[4];
unsigned long freq;
struct a6xx_hfi_queue queues[2];
struct tasklet_struct hfi_tasklet;
@ -89,11 +91,6 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
return msm_writel(value, gmu->pdc_mmio + (offset << 2));
}
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@ -103,6 +100,16 @@ static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
gmu_write(gmu, reg, val | or);
}
static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
{
u64 val;
val = (u64) msm_readl(gmu->mmio + (lo << 2));
val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
return val;
}
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
@ -157,6 +164,4 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
void a6xx_hfi_task(unsigned long data);
#endif

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@ -167,8 +167,8 @@ static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_
#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040

View File

@ -7,6 +7,8 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include <linux/devfreq.h>
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -438,10 +440,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
A6XX_PROTECT_RDONLY(0x8d0, 0x23));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@ -682,6 +682,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
gpu->needs_hw_init = true;
msm_gpu_resume_devfreq(gpu);
return ret;
}
@ -690,6 +692,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
devfreq_suspend_device(gpu->devfreq.devfreq);
/*
* Make sure the GMU is idle before continuing (because some transitions
* may use VBIF
@ -744,7 +748,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
if (a6xx_gpu->sqe_bo) {
if (a6xx_gpu->sqe_iova)
msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
a6xx_gmu_remove(a6xx_gpu);
@ -753,6 +757,24 @@ static void a6xx_destroy(struct msm_gpu *gpu)
kfree(a6xx_gpu);
}
static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles;
unsigned long busy_time;
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
busy_time = ((busy_cycles - gpu->devfreq.busy_cycles) * 10) / 192;
gpu->devfreq.busy_cycles = busy_cycles;
return busy_time;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@ -768,6 +790,9 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
},
.get_timestamp = a6xx_get_timestamp,
};
@ -799,7 +824,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
/* Check if there is a GMU phandle and set it up */
node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);

View File

@ -56,5 +56,6 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */

View File

@ -79,83 +79,72 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
struct a6xx_hfi_response {
u32 id;
u32 seqnum;
struct list_head node;
struct completion complete;
u32 error;
u32 payload[16];
};
/*
* Incoming HFI ack messages can come in out of order so we need to store all
* the pending messages on a list until they are handled.
*/
static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
static LIST_HEAD(hfi_ack_list);
static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
struct a6xx_hfi_msg_response *msg)
static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
u32 *payload, u32 payload_size)
{
struct a6xx_hfi_response *resp;
u32 id, seqnum;
/* msg->ret_header contains the header of the message being acked */
id = HFI_HEADER_ID(msg->ret_header);
seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
spin_lock(&hfi_ack_lock);
list_for_each_entry(resp, &hfi_ack_list, node) {
if (resp->id == id && resp->seqnum == seqnum) {
resp->error = msg->error;
memcpy(resp->payload, msg->payload,
sizeof(resp->payload));
complete(&resp->complete);
spin_unlock(&hfi_ack_lock);
return;
}
}
spin_unlock(&hfi_ack_lock);
dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
}
static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
struct a6xx_hfi_msg_response *msg)
{
struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
}
void a6xx_hfi_task(unsigned long data)
{
struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
struct a6xx_hfi_msg_response resp;
u32 val;
int ret;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
dev_err(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
}
/* Clear the interrupt */
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
for (;;) {
u32 id;
int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
struct a6xx_hfi_msg_response resp;
/* Get the next packet */
ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* Returns the number of bytes copied or negative on error */
if (ret <= 0) {
if (ret < 0)
dev_err(gmu->dev,
"Unable to read the HFI message queue\n");
break;
/* If the queue is empty our response never made it */
if (!ret) {
dev_err(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
}
id = HFI_HEADER_ID(resp.header);
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
if (id == HFI_F2H_MSG_ACK)
a6xx_hfi_handle_ack(gmu, &resp);
else if (id == HFI_F2H_MSG_ERROR)
a6xx_hfi_handle_error(gmu, &resp);
dev_err(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
dev_err(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
dev_err(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
}
/* All is well, copy over the buffer */
if (payload && payload_size)
memcpy(payload, resp.payload,
min_t(u32, payload_size, sizeof(resp.payload)));
return 0;
}
}
@ -163,7 +152,6 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
struct a6xx_hfi_response resp = { 0 };
int ret, dwords = size >> 2;
u32 seqnum;
@ -173,53 +161,14 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
init_completion(&resp.complete);
resp.id = id;
resp.seqnum = seqnum;
spin_lock_bh(&hfi_ack_lock);
list_add_tail(&resp.node, &hfi_ack_list);
spin_unlock_bh(&hfi_ack_lock);
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
dev_err(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
goto out;
}
/* Wait up to 5 seconds for the response */
ret = wait_for_completion_timeout(&resp.complete,
msecs_to_jiffies(5000));
if (!ret) {
dev_err(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
ret = -ETIMEDOUT;
} else
ret = 0;
out:
spin_lock_bh(&hfi_ack_lock);
list_del(&resp.node);
spin_unlock_bh(&hfi_ack_lock);
if (ret)
return ret;
if (resp.error) {
dev_err(gmu->dev, "Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
}
if (payload && payload_size) {
int copy = min_t(u32, payload_size, sizeof(resp.payload));
memcpy(payload, resp.payload, copy);
}
return 0;
return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:

View File

@ -120,6 +120,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
},
};

View File

@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@ -237,7 +237,7 @@ enum adreno_pm4_type3_packets {
CP_UNK_A6XX_14 = 20,
CP_UNK_A6XX_36 = 54,
CP_UNK_A6XX_55 = 85,
UNK_A6XX_6D = 109,
CP_REG_WRITE = 109,
};
enum adreno_state_block {
@ -968,19 +968,19 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
}
#define REG_CP_SET_BIN_DATA5_5 0x00000005
#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff
#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0
static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
{
return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_SET_BIN_DATA5_6 0x00000006
#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff
#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0
static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
{
return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_REG_TO_MEM_0 0x00000000

File diff suppressed because it is too large Load Diff

View File

@ -83,14 +83,14 @@ struct dpu_crtc_smmu_state_data {
/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @hw_ctl: CTL Path HW driver context
* @lm_ctl: CTL Path HW driver context
* @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_ctl *lm_ctl;
struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
@ -121,11 +121,6 @@ struct dpu_crtc_frame_event {
* struct dpu_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
* @num_ctls : Number of ctl paths in use
* @num_mixers : Number of mixers in use
* @mixers_swapped: Whether the mixers have been swapped for left/right update
* especially in the case of DSC Merge.
* @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
@ -156,27 +151,14 @@ struct dpu_crtc_frame_event {
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
* @misr_enable : boolean entry indicates misr enable/disable status.
* @misr_frame_count : misr frame count provided by client
* @misr_data : store misr data before turning off the clocks.
* @phandle: Pointer to power handler
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
* @rp_lock : serialization lock for resource pool
* @rp_head : list of active resource pool
* @scl3_cfg_lut : qseed3 lut config
*/
struct dpu_crtc {
struct drm_crtc base;
char name[DPU_CRTC_NAME_SIZE];
/* HW Resources reserved for the crtc */
u32 num_ctls;
u32 num_mixers;
bool mixers_swapped;
struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
struct drm_pending_vblank_event *event;
u32 vsync_count;
@ -206,77 +188,20 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
bool misr_enable;
u32 misr_frame_count;
u32 misr_data[CRTC_DUAL_MIXERS];
struct dpu_power_handle *phandle;
struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
struct mutex rp_lock;
struct list_head rp_head;
struct dpu_crtc_smmu_state_data smmu_state;
};
#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
/**
* struct dpu_crtc_res_ops - common operations for crtc resources
* @get: get given resource
* @put: put given resource
*/
struct dpu_crtc_res_ops {
void *(*get)(void *val, u32 type, u64 tag);
void (*put)(void *val);
};
#define DPU_CRTC_RES_FLAG_FREE BIT(0)
/**
* struct dpu_crtc_res - definition of crtc resources
* @list: list of crtc resource
* @type: crtc resource type
* @tag: unique identifier per type
* @refcount: reference/usage count
* @ops: callback operations
* @val: resource handle associated with type/tag
* @flags: customization flags
*/
struct dpu_crtc_res {
struct list_head list;
u32 type;
u64 tag;
atomic_t refcount;
struct dpu_crtc_res_ops ops;
void *val;
u32 flags;
};
/**
* dpu_crtc_respool - crtc resource pool
* @rp_lock: pointer to serialization lock
* @rp_head: pointer to head of active resource pools of this crtc
* @rp_list: list of crtc resource pool
* @sequence_id: sequence identifier, incremented per state duplication
* @res_list: list of resource managed by this resource pool
* @ops: resource operations for parent resource pool
*/
struct dpu_crtc_respool {
struct mutex *rp_lock;
struct list_head *rp_head;
struct list_head rp_list;
u32 sequence_id;
struct list_head res_list;
struct dpu_crtc_res_ops ops;
};
/**
* struct dpu_crtc_state - dpu container for atomic crtc state
* @base: Base drm crtc state structure
* @is_ppsplit : Whether current topology requires PPSplit special handling
* @bw_control : true if bw/clk controlled by core bw/clk properties
* @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@ -285,41 +210,41 @@ struct dpu_crtc_respool {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @new_perf: new performance state being requested
* @num_mixers : Number of mixers in use
* @mixers : List of active mixers
* @num_ctls : Number of ctl paths in use
* @hw_ctls : List of active ctl paths
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
bool bw_control;
bool bw_split_vote;
bool is_ppsplit;
struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
struct dpu_core_perf_params new_perf;
struct dpu_crtc_respool rp;
/* HW Resources reserved for the crtc */
u32 num_mixers;
struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
u32 num_ctls;
struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
* dpu_crtc_get_mixer_width - get the mixer width
* Mixer width will be same as panel width(/2 for split)
* dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
* @cstate: Pointer to dpu crtc state
* @Return: true - has two mixers, false - has one mixer
*/
static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
{
u32 mixer_width;
if (!dpu_crtc || !cstate || !mode)
return 0;
mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
return mixer_width;
return cstate->num_mixers == CRTC_DUAL_MIXERS;
}
/**
@ -375,9 +300,11 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
* dpu_crtc_init - create a new crtc object
* @dev: dpu device
* @plane: base plane
* @cursor: cursor plane
* @Return: new crtc object or error
*/
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor);
/**
* dpu_crtc_register_custom_event - api for enabling/disabling crtc event

View File

@ -65,8 +65,6 @@
#define MAX_CHANNELS_PER_ENC 2
#define MISR_BUFF_SIZE 256
#define IDLE_SHORT_TIMEOUT 1
#define MAX_VDISPLAY_SPLIT 1080
@ -161,8 +159,6 @@ enum dpu_enc_rc_states {
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
* @misr_enable: misr enable/disable status
* @misr_frame_count: misr frame count before start capturing the data
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
* virt encoder over various state changes
@ -179,11 +175,10 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
uint32_t display_num_of_h_tiles;
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
bool intfs_swapped;
@ -202,8 +197,6 @@ struct dpu_encoder_virt {
struct timer_list vsync_event_timer;
struct msm_display_info disp_info;
bool misr_enable;
u32 misr_frame_count;
bool idle_pc_supported;
struct mutex rc_lock;
@ -443,30 +436,22 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
}
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
if (!hw_res || !drm_enc || !conn_state) {
DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
drm_enc != 0, hw_res != 0, conn_state != 0);
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.get_hw_resources)
phys->ops.get_hw_resources(phys, hw_res, conn_state);
phys->ops.get_hw_resources(phys, hw_res);
}
}
@ -525,7 +510,7 @@ void dpu_encoder_helper_split_config(
hw_mdptop = phys_enc->hw_mdptop;
disp_info = &dpu_enc->disp_info;
if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
return;
/**
@ -660,7 +645,7 @@ static int dpu_encoder_virt_atomic_check(
if (drm_atomic_crtc_needs_modeset(crtc_state)
&& dpu_enc->mode_set_complete) {
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
conn_state, topology, true);
topology, true);
dpu_enc->mode_set_complete = false;
}
}
@ -1016,9 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
struct dpu_rm_hw_iter pp_iter;
struct dpu_rm_hw_iter pp_iter, ctl_iter;
struct msm_display_topology topology;
enum dpu_rm_topology_name topology_name;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
int i = 0, ret;
if (!drm_enc) {
@ -1051,7 +1036,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
conn->state, topology, false);
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"failed to reserve hw resources, %d\n", ret);
@ -1066,19 +1051,33 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
}
topology_name = dpu_rm_get_topology_name(topology);
dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
break;
hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
}
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys) {
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc,
"invalid pingpong block for the encoder\n");
DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
"at idx: %d\n", i);
return;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
"at idx: %d\n", i);
return;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i];
phys->connector = conn->state->connector;
phys->topology_name = topology_name;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
@ -1111,12 +1110,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
dpu_enc->cur_master->hw_mdptop);
if (dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
@ -1153,7 +1146,7 @@ void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i, ret = 0;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@ -1166,21 +1159,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
dpu_enc->cur_master = NULL;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
/* always enable slave encoder before master */
if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
dpu_enc->cur_master = phys;
break;
}
}
if (!dpu_enc->cur_master) {
DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
return;
}
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
if (ret) {
@ -1189,26 +1173,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys)
continue;
if (phys != dpu_enc->cur_master) {
if (phys->ops.enable)
phys->ops.enable(phys);
}
if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
phys->ops.setup_misr(phys, true,
dpu_enc->misr_frame_count);
}
if (dpu_enc->cur_master->ops.enable)
dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
}
@ -1266,8 +1230,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_enc->phys_encs[i]->connector = NULL;
}
dpu_enc->cur_master = NULL;
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
@ -1397,9 +1359,9 @@ static void dpu_encoder_frame_done_callback(
/* One of the physical encoders has become idle */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
clear_bit(i, dpu_enc->frame_busy_mask);
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
clear_bit(i, dpu_enc->frame_busy_mask);
}
}
@ -1480,7 +1442,8 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
ret = ctl->ops.get_pending_flush(ctl);
trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
pending_kickoff_cnt, ctl->idx, ret);
pending_kickoff_cnt, ctl->idx,
extra_flush_bits, ret);
}
/**
@ -1879,7 +1842,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
phys->ops.handle_post_kickoff(phys);
}
if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
!_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
ktime_to_ms(wakeup_time));
@ -1955,113 +1918,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
return single_open(file, _dpu_encoder_status_show, inode->i_private);
}
static ssize_t _dpu_encoder_misr_setup(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_encoder_virt *dpu_enc;
int i = 0, rc;
char buf[MISR_BUFF_SIZE + 1];
size_t buff_copy;
u32 frame_count, enable;
if (!file || !file->private_data)
return -EINVAL;
dpu_enc = file->private_data;
buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
if (copy_from_user(buf, user_buf, buff_copy))
return -EINVAL;
buf[buff_copy] = 0; /* end of string */
if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
return -EINVAL;
rc = _dpu_encoder_power_enable(dpu_enc, true);
if (rc)
return rc;
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->misr_enable = enable;
dpu_enc->misr_frame_count = frame_count;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys || !phys->ops.setup_misr)
continue;
phys->ops.setup_misr(phys, enable, frame_count);
}
mutex_unlock(&dpu_enc->enc_lock);
_dpu_encoder_power_enable(dpu_enc, false);
return count;
}
static ssize_t _dpu_encoder_misr_read(struct file *file,
char __user *user_buff, size_t count, loff_t *ppos)
{
struct dpu_encoder_virt *dpu_enc;
int i = 0, len = 0;
char buf[MISR_BUFF_SIZE + 1] = {'\0'};
int rc;
if (*ppos)
return 0;
if (!file || !file->private_data)
return -EINVAL;
dpu_enc = file->private_data;
rc = _dpu_encoder_power_enable(dpu_enc, true);
if (rc)
return rc;
mutex_lock(&dpu_enc->enc_lock);
if (!dpu_enc->misr_enable) {
len += snprintf(buf + len, MISR_BUFF_SIZE - len,
"disabled\n");
goto buff_check;
} else if (dpu_enc->disp_info.capabilities &
~MSM_DISPLAY_CAP_VID_MODE) {
len += snprintf(buf + len, MISR_BUFF_SIZE - len,
"unsupported\n");
goto buff_check;
}
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys || !phys->ops.collect_misr)
continue;
len += snprintf(buf + len, MISR_BUFF_SIZE - len,
"Intf idx:%d\n", phys->intf_idx - INTF_0);
len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
phys->ops.collect_misr(phys));
}
buff_check:
if (count <= len) {
len = 0;
goto end;
}
if (copy_to_user(user_buff, buf, len)) {
len = -EFAULT;
goto end;
}
*ppos += len; /* increase offset */
end:
mutex_unlock(&dpu_enc->enc_lock);
_dpu_encoder_power_enable(dpu_enc, false);
return len;
}
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@ -2076,12 +1932,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
.release = single_release,
};
static const struct file_operations debugfs_misr_fops = {
.open = simple_open,
.read = _dpu_encoder_misr_read,
.write = _dpu_encoder_misr_setup,
};
char name[DPU_NAME_SIZE];
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@ -2105,9 +1955,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
debugfs_create_file("misr_data", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i] &&
dpu_enc->phys_encs[i]->ops.late_register)
@ -2195,6 +2042,11 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
if (params->split_role == ENC_ROLE_SLAVE)
dpu_enc->cur_slave = enc;
else
dpu_enc->cur_master = enc;
return 0;
}
@ -2206,8 +2058,7 @@ static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info,
int *drm_enc_mode)
struct msm_display_info *disp_info)
{
int ret = 0;
int i = 0;
@ -2220,6 +2071,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
return -EINVAL;
}
dpu_enc->cur_master = NULL;
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
@ -2228,24 +2081,17 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
DPU_DEBUG("\n");
if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
*drm_enc_mode = DRM_MODE_ENCODER_DSI;
switch (disp_info->intf_type) {
case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
intf_type = INTF_HDMI;
} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
intf_type = INTF_DP;
} else {
break;
default:
DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
return -EINVAL;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
@ -2358,25 +2204,22 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
int drm_enc_mode = DRM_MODE_ENCODER_NONE;
int ret = 0;
dpu_enc = to_dpu_encoder_virt(enc);
mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
&drm_enc_mode);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
dpu_enc->cur_master = NULL;
spin_lock_init(&dpu_enc->enc_spinlock);
atomic_set(&dpu_enc->frame_done_timeout, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);

View File

@ -32,15 +32,9 @@
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
* @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
* @display_num_of_h_tiles: Number of horizontal tiles in case of split
* interface
* @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
bool needs_cdm;
u32 display_num_of_h_tiles;
};
/**
@ -56,11 +50,9 @@ struct dpu_encoder_kickoff_params {
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
* @conn_state: report hw reqs based on this proposed connector state
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state);
struct dpu_encoder_hw_resources *hw_res);
/**
* dpu_encoder_register_vblank_callback - provide callback to encoder that

View File

@ -22,8 +22,8 @@
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
#include "dpu_hw_cdm.h"
#include "dpu_encoder.h"
#include "dpu_crtc.h"
#define DPU_ENCODER_NAME_MAX 16
@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
* @setup_misr: Sets up MISR, enable and disables based on sysfs
* @collect_misr: Collects MISR data on frame update
* @hw_reset: Issue HW recovery such as CTL reset and clear
* DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
@ -143,8 +141,7 @@ struct dpu_encoder_phys_ops {
struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state);
struct dpu_encoder_hw_resources *hw_res);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
@ -154,10 +151,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
bool enable, u32 frame_count);
u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
@ -210,8 +203,6 @@ struct dpu_encoder_irq {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
* @hw_cdm: Hardware interface to the cdm registers
* @cdm_cfg: Chroma-down hardware configuration
* @hw_pp: Hardware interface to the ping pong registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
@ -219,7 +210,6 @@ struct dpu_encoder_irq {
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
* @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@ -241,15 +231,12 @@ struct dpu_encoder_phys {
const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_cdm *hw_cdm;
struct dpu_hw_cdm_cfg cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@ -367,11 +354,15 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_crtc_state *dpu_cstate;
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
dpu_crtc_state_is_stereo(dpu_cstate))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;

View File

@ -196,9 +196,6 @@ static void dpu_encoder_phys_cmd_mode_set(
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
struct dpu_rm_hw_iter iter;
int i, instance;
if (!phys_enc || !mode || !adj_mode) {
DPU_ERROR("invalid args\n");
@ -208,22 +205,6 @@ static void dpu_encoder_phys_cmd_mode_set(
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
/* Retrieve previously allocated HW Resources. Shouldn't fail */
dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
for (i = 0; i <= instance; i++) {
if (dpu_rm_get_hw(rm, &iter))
phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
}
if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
PTR_ERR(phys_enc->hw_ctl));
phys_enc->hw_ctl = NULL;
return;
}
_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
}
@ -618,23 +599,8 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
@ -823,7 +789,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@ -836,14 +801,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
goto fail;
}
phys_enc = &cmd_enc->base;
hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
if (IS_ERR_OR_NULL(hw_mdp)) {
ret = PTR_ERR(hw_mdp);
DPU_ERROR("failed to get mdptop\n");
goto fail_mdp_init;
}
phys_enc->hw_mdptop = hw_mdp;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
@ -898,8 +856,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
fail_mdp_init:
kfree(cmd_enc);
fail:
return ERR_PTR(ret);
}

View File

@ -355,13 +355,14 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_crtc_state *dpu_cstate;
if (!phys_enc)
return false;
if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
return true;
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
return false;
return dpu_cstate->num_ctls > 1;
}
static bool dpu_encoder_phys_vid_needs_single_flush(
@ -395,9 +396,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct dpu_rm *rm;
struct dpu_rm_hw_iter iter;
int i, instance;
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->dpu_kms) {
@ -405,7 +403,6 @@ static void dpu_encoder_phys_vid_mode_set(
return;
}
rm = &phys_enc->dpu_kms->rm;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (adj_mode) {
@ -414,21 +411,6 @@ static void dpu_encoder_phys_vid_mode_set(
DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
}
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
/* Retrieve previously allocated HW Resources. Shouldn't fail */
dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
for (i = 0; i <= instance; i++) {
if (dpu_rm_get_hw(rm, &iter))
phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
}
if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
PTR_ERR(phys_enc->hw_ctl));
phys_enc->hw_ctl = NULL;
return;
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
}
@ -481,7 +463,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_intf *intf;
struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
@ -493,11 +475,20 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
priv = phys_enc->parent->dev->dev_private;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
intf = vid_enc->hw_intf;
ctl = phys_enc->hw_ctl;
if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
if (hw_intf->idx == phys_enc->intf_idx) {
vid_enc->hw_intf = hw_intf;
break;
}
}
if (!vid_enc->hw_intf) {
DPU_ERROR("hw_intf not assigned\n");
return;
}
@ -519,7 +510,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
@ -547,25 +538,9 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !hw_res) {
DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
phys_enc != 0, hw_res != 0, conn_state != 0);
return;
}
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (!vid_enc->hw_intf) {
DPU_ERROR("invalid arg(s), hw_intf\n");
return;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
static int _dpu_encoder_phys_vid_wait_for_vblank(
@ -756,32 +731,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
}
}
static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
bool enable, u32 frame_count)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc)
return;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
enable, frame_count);
}
static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc)
return 0;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
}
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
@ -817,8 +766,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
@ -828,8 +775,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_vid *vid_enc = NULL;
struct dpu_rm_hw_iter iter;
struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@ -846,35 +791,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc = &vid_enc->base;
hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
if (IS_ERR_OR_NULL(hw_mdp)) {
ret = PTR_ERR(hw_mdp);
DPU_ERROR("failed to get mdptop\n");
goto fail;
}
phys_enc->hw_mdptop = hw_mdp;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
/**
* hw_intf resource permanently assigned to this encoder
* Other resources allocated at atomic commit time by use case
*/
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
if (hw_intf->idx == p->intf_idx) {
vid_enc->hw_intf = hw_intf;
break;
}
}
if (!vid_enc->hw_intf) {
ret = -EINVAL;
DPU_ERROR("failed to get hw_intf\n");
goto fail;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);

View File

@ -29,6 +29,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define DMA_CURSOR_SDM845_MASK \
(DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
@ -71,7 +74,6 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
.base = 0x0, .len = 0x45C,
.features = 0,
.highest_bank_bit = 0x2,
.has_dest_scaler = true,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
@ -174,45 +176,35 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
#define SSPP_BLK(_name, _id, _base, _features, \
_sblk, _xinid, _type, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
.features = VIG_SDM845_MASK, \
.features = _features, \
.sblk = &_sblk, \
.xin_id = _xinid, \
.type = SSPP_TYPE_VIG, \
.clk_ctrl = _clkctrl \
}
#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
.features = DMA_SDM845_MASK, \
.sblk = &_sblk, \
.xin_id = _xinid, \
.type = SSPP_TYPE_DMA, \
.type = _type, \
.clk_ctrl = _clkctrl \
}
static struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK,
sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK,
sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
@ -227,48 +219,23 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = MIXER_SDM845_MASK, \
.sblk = &sdm845_lm_sblk, \
.ds = _ds, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
static struct dpu_lm_cfg sdm845_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
};
/*************************************************************
* DS sub blocks config
*************************************************************/
static const struct dpu_ds_top_cfg sdm845_ds_top = {
.name = "ds_top_0", .id = DS_TOP,
.base = 0x60000, .len = 0xc,
.maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
.maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxupscale = MAX_UPSCALE_RATIO,
};
#define DS_BLK(_name, _id, _base) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x800, \
.features = DPU_SSPP_SCALER_QSEED3, \
.top = &sdm845_ds_top \
}
static struct dpu_ds_cfg sdm845_ds[] = {
DS_BLK("ds_0", DS_0, 0x800),
DS_BLK("ds_1", DS_1, 0x1000),
LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
};
/*************************************************************
@ -327,18 +294,6 @@ static struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
};
/*************************************************************
* CDM sub blocks config
*************************************************************/
static struct dpu_cdm_cfg sdm845_cdm[] = {
{
.name = "cdm_0", .id = CDM_0,
.base = 0x79200, .len = 0x224,
.features = 0,
.intf_connect = BIT(INTF_3),
},
};
/*************************************************************
* VBIF sub blocks config
*************************************************************/
@ -461,12 +416,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
.ds_count = ARRAY_SIZE(sdm845_ds),
.ds = sdm845_ds,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.cdm_count = ARRAY_SIZE(sdm845_cdm),
.cdm = sdm845_cdm,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),

View File

@ -428,7 +428,6 @@ struct dpu_clk_ctrl_reg {
* @highest_bank_bit: UBWC parameter
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
* @has_dest_scaler: indicates support of destination scaler
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@ -436,7 +435,6 @@ struct dpu_mdp_cfg {
u32 highest_bank_bit;
u32 ubwc_static;
u32 ubwc_swizzle;
bool has_dest_scaler;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
@ -474,49 +472,15 @@ struct dpu_sspp_cfg {
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
* @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 ds;
unsigned long lm_pair_mask;
};
/**
* struct dpu_ds_top_cfg - information of dest scaler top
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying features
* @version hw version of dest scaler
* @maxinputwidth maximum input line width
* @maxoutputwidth maximum output line width
* @maxupscale maximum upscale ratio
*/
struct dpu_ds_top_cfg {
DPU_HW_BLK_INFO;
u32 version;
u32 maxinputwidth;
u32 maxoutputwidth;
u32 maxupscale;
};
/**
* struct dpu_ds_cfg - information of dest scaler blocks
* @id enum identifying this block
* @base register offset wrt DS top offset
* @features bit mask identifying features
* @version hw version of the qseed block
* @top DS top information
*/
struct dpu_ds_cfg {
DPU_HW_BLK_INFO;
u32 version;
const struct dpu_ds_top_cfg *top;
};
/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
@ -529,18 +493,6 @@ struct dpu_pingpong_cfg {
const struct dpu_pingpong_sub_blks *sblk;
};
/**
* struct dpu_cdm_cfg - information of chroma down blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @intf_connect Bitmask of INTF IDs this CDM can connect to
*/
struct dpu_cdm_cfg {
DPU_HW_BLK_INFO;
unsigned long intf_connect;
};
/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
@ -728,15 +680,9 @@ struct dpu_mdss_cfg {
u32 mixer_count;
struct dpu_lm_cfg *mixer;
u32 ds_count;
struct dpu_ds_cfg *ds;
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
u32 cdm_count;
struct dpu_cdm_cfg *cdm;
u32 intf_count;
struct dpu_intf_cfg *intf;
@ -771,9 +717,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
#define BLK_DS(s) ((s)->ds)
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_CDM(s) ((s)->cdm)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)

View File

@ -1,323 +0,0 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_cdm.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define CDM_CSC_10_OPMODE 0x000
#define CDM_CSC_10_BASE 0x004
#define CDM_CDWN2_OP_MODE 0x100
#define CDM_CDWN2_CLAMP_OUT 0x104
#define CDM_CDWN2_PARAMS_3D_0 0x108
#define CDM_CDWN2_PARAMS_3D_1 0x10C
#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
#define CDM_CDWN2_COEFF_COSITE_V 0x128
#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
#define CDM_CDWN2_OUT_SIZE 0x130
#define CDM_HDMI_PACK_OP_MODE 0x200
#define CDM_CSC_10_MATRIX_COEFF_0 0x004
/**
* Horizontal coefficients for cosite chroma downscale
* s13 representation of coefficients
*/
static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
/**
* Horizontal coefficients for offsite chroma downscale
*/
static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
/**
* Vertical coefficients for cosite chroma downscale
*/
static u32 cosite_v_coeff[] = {0x00080004};
/**
* Vertical coefficients for offsite chroma downscale
*/
static u32 offsite_v_coeff[] = {0x00060002};
/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
static struct dpu_csc_cfg rgb2yuv_cfg = {
{
0x0083, 0x0102, 0x0032,
0x1fb5, 0x1f6c, 0x00e1,
0x00e1, 0x1f45, 0x1fdc
},
{ 0x00, 0x00, 0x00 },
{ 0x0040, 0x0200, 0x0200 },
{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
};
static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->cdm_count; i++) {
if (cdm == m->cdm[i].id) {
b->base_off = addr;
b->blk_off = m->cdm[i].base;
b->length = m->cdm[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_CDM;
return &m->cdm[i];
}
}
return ERR_PTR(-EINVAL);
}
static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
struct dpu_csc_cfg *data)
{
dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
return 0;
}
static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
struct dpu_hw_cdm_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 opmode = 0;
u32 out_size = 0;
if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
opmode &= ~BIT(7);
else
opmode |= BIT(7);
/* ENABLE DWNS_H bit */
opmode |= BIT(1);
switch (cfg->h_cdwn_type) {
case CDM_CDWN_DISABLE:
/* CLEAR METHOD_H field */
opmode &= ~(0x18);
/* CLEAR DWNS_H bit */
opmode &= ~BIT(1);
break;
case CDM_CDWN_PIXEL_DROP:
/* Clear METHOD_H field (pixel drop is 0) */
opmode &= ~(0x18);
break;
case CDM_CDWN_AVG:
/* Clear METHOD_H field (Average is 0x1) */
opmode &= ~(0x18);
opmode |= (0x1 << 0x3);
break;
case CDM_CDWN_COSITE:
/* Clear METHOD_H field (Average is 0x2) */
opmode &= ~(0x18);
opmode |= (0x2 << 0x3);
/* Co-site horizontal coefficients */
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
cosite_h_coeff[0]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
cosite_h_coeff[1]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
cosite_h_coeff[2]);
break;
case CDM_CDWN_OFFSITE:
/* Clear METHOD_H field (Average is 0x3) */
opmode &= ~(0x18);
opmode |= (0x3 << 0x3);
/* Off-site horizontal coefficients */
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
offsite_h_coeff[0]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
offsite_h_coeff[1]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
offsite_h_coeff[2]);
break;
default:
pr_err("%s invalid horz down sampling type\n", __func__);
return -EINVAL;
}
/* ENABLE DWNS_V bit */
opmode |= BIT(2);
switch (cfg->v_cdwn_type) {
case CDM_CDWN_DISABLE:
/* CLEAR METHOD_V field */
opmode &= ~(0x60);
/* CLEAR DWNS_V bit */
opmode &= ~BIT(2);
break;
case CDM_CDWN_PIXEL_DROP:
/* Clear METHOD_V field (pixel drop is 0) */
opmode &= ~(0x60);
break;
case CDM_CDWN_AVG:
/* Clear METHOD_V field (Average is 0x1) */
opmode &= ~(0x60);
opmode |= (0x1 << 0x5);
break;
case CDM_CDWN_COSITE:
/* Clear METHOD_V field (Average is 0x2) */
opmode &= ~(0x60);
opmode |= (0x2 << 0x5);
/* Co-site vertical coefficients */
DPU_REG_WRITE(c,
CDM_CDWN2_COEFF_COSITE_V,
cosite_v_coeff[0]);
break;
case CDM_CDWN_OFFSITE:
/* Clear METHOD_V field (Average is 0x3) */
opmode &= ~(0x60);
opmode |= (0x3 << 0x5);
/* Off-site vertical coefficients */
DPU_REG_WRITE(c,
CDM_CDWN2_COEFF_OFFSITE_V,
offsite_v_coeff[0]);
break;
default:
return -EINVAL;
}
if (cfg->v_cdwn_type || cfg->h_cdwn_type)
opmode |= BIT(0); /* EN CDWN module */
else
opmode &= ~BIT(0);
out_size = (cfg->output_width & 0xFFFF) |
((cfg->output_height & 0xFFFF) << 16);
DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
((0x3FF << 16) | 0x0));
return 0;
}
static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
struct dpu_hw_cdm_cfg *cdm)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
const struct dpu_format *fmt = cdm->output_fmt;
struct cdm_output_cfg cdm_cfg = { 0 };
u32 opmode = 0;
u32 csc = 0;
if (!DPU_FORMAT_IS_YUV(fmt))
return -EINVAL;
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
if (fmt->chroma_sample != DPU_CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = BIT(0);
opmode |= (fmt->chroma_sample << 1);
cdm_cfg.intf_en = true;
}
csc |= BIT(2);
csc &= ~BIT(1);
csc |= BIT(0);
if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
return 0;
}
static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
{
struct cdm_output_cfg cdm_cfg = { 0 };
if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
}
static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
unsigned long features)
{
ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
ops->enable = dpu_hw_cdm_enable;
ops->disable = dpu_hw_cdm_disable;
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m,
struct dpu_hw_mdp *hw_mdp)
{
struct dpu_hw_cdm *c;
struct dpu_cdm_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _cdm_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
c->idx = idx;
c->caps = cfg;
_setup_cdm_ops(&c->ops, c->caps->features);
c->hw_mdp = hw_mdp;
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
/*
* Perform any default initialization for the chroma down module
* @setup default csc coefficients
*/
dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
{
if (cdm)
dpu_hw_blk_destroy(&cdm->base);
kfree(cdm);
}

View File

@ -1,139 +0,0 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_CDM_H
#define _DPU_HW_CDM_H
#include "dpu_hw_mdss.h"
#include "dpu_hw_top.h"
#include "dpu_hw_blk.h"
struct dpu_hw_cdm;
struct dpu_hw_cdm_cfg {
u32 output_width;
u32 output_height;
u32 output_bit_depth;
u32 h_cdwn_type;
u32 v_cdwn_type;
const struct dpu_format *output_fmt;
u32 output_type;
int flags;
};
enum dpu_hw_cdwn_type {
CDM_CDWN_DISABLE,
CDM_CDWN_PIXEL_DROP,
CDM_CDWN_AVG,
CDM_CDWN_COSITE,
CDM_CDWN_OFFSITE,
};
enum dpu_hw_cdwn_output_type {
CDM_CDWN_OUTPUT_HDMI,
CDM_CDWN_OUTPUT_WB,
};
enum dpu_hw_cdwn_output_bit_depth {
CDM_CDWN_OUTPUT_8BIT,
CDM_CDWN_OUTPUT_10BIT,
};
/**
* struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
* Assumption is these functions will be called after
* clocks are enabled
* @setup_csc: Programs the csc matrix
* @setup_cdwn: Sets up the chroma down sub module
* @enable: Enables the output to interface and programs the
* output packer
* @disable: Puts the cdm in bypass mode
*/
struct dpu_hw_cdm_ops {
/**
* Programs the CSC matrix for conversion from RGB space to YUV space,
* it is optional to call this function as this matrix is automatically
* set during initialization, user should call this if it wants
* to program a different matrix than default matrix.
* @cdm: Pointer to the chroma down context structure
* @data Pointer to CSC configuration data
* return: 0 if success; error code otherwise
*/
int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
struct dpu_csc_cfg *data);
/**
* Programs the Chroma downsample part.
* @cdm Pointer to chroma down context
*/
int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
struct dpu_hw_cdm_cfg *cfg);
/**
* Enable the CDM module
* @cdm Pointer to chroma down context
*/
int (*enable)(struct dpu_hw_cdm *cdm,
struct dpu_hw_cdm_cfg *cfg);
/**
* Disable the CDM module
* @cdm Pointer to chroma down context
*/
void (*disable)(struct dpu_hw_cdm *cdm);
};
struct dpu_hw_cdm {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* chroma down */
const struct dpu_cdm_cfg *caps;
enum dpu_cdm idx;
/* mdp top hw driver */
struct dpu_hw_mdp *hw_mdp;
/* ops */
struct dpu_hw_cdm_ops ops;
};
/**
* dpu_hw_cdm - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_cdm, base);
}
/**
* dpu_hw_cdm_init - initializes the cdm hw driver object.
* should be called once before accessing every cdm.
* @idx: cdm index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
* @hw_mdp: pointer to mdp top hw driver object
*/
struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m,
struct dpu_hw_mdp *hw_mdp);
/**
* dpu_hw_cdm_destroy - destroys CDM driver context
* @cdm: pointer to CDM driver context
*/
void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
#endif /*_DPU_HW_CDM_H */

View File

@ -224,19 +224,6 @@ static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_cdm cdm)
{
switch (cdm) {
case CDM_0:
*flushbits |= BIT(26);
break;
default:
return -EINVAL;
}
return 0;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@ -310,7 +297,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
u8 stages;
int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
@ -485,7 +472,6 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {

View File

@ -142,10 +142,6 @@ struct dpu_hw_ctl_ops {
u32 *flushbits,
enum dpu_intf blk);
int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_cdm blk);
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer

View File

@ -65,9 +65,6 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
#define INTF_MISR_CTRL 0x180
#define INTF_MISR_SIGNATURE 0x184
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@ -246,30 +243,6 @@ static void dpu_hw_intf_get_status(
}
}
static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
bool enable, u32 frame_count)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 config = 0;
DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
/* clear misr data */
wmb();
if (enable)
config = (frame_count & MISR_FRAME_COUNT_MASK) |
MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
}
static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
}
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
@ -289,8 +262,6 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->setup_misr = dpu_hw_intf_setup_misr;
ops->collect_misr = dpu_hw_intf_collect_misr;
ops->get_line_count = dpu_hw_intf_get_line_count;
}

View File

@ -59,8 +59,6 @@ struct intf_status {
* @ setup_prog_fetch : enables/disables the programmable fetch logic
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ setup_misr: enables/disables MISR in HW register
* @ collect_misr: reads and stores MISR data from HW register
* @ get_line_count: reads current vertical line counter
*/
struct dpu_hw_intf_ops {
@ -77,11 +75,6 @@ struct dpu_hw_intf_ops {
void (*get_status)(struct dpu_hw_intf *intf,
struct intf_status *status);
void (*setup_misr)(struct dpu_hw_intf *intf,
bool enable, u32 frame_count);
u32 (*collect_misr)(struct dpu_hw_intf *intf);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
};

View File

@ -34,9 +34,6 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
#define LM_MISR_CTRL 0x310
#define LM_MISR_SIGNATURE 0x314
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@ -171,30 +168,6 @@ static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
{
}
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
bool enable, u32 frame_count)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 config = 0;
DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
/* clear misr data */
wmb();
if (enable)
config = (frame_count & MISR_FRAME_COUNT_MASK) |
MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
DPU_REG_WRITE(c, LM_MISR_CTRL, config);
}
static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, LM_MISR_SIGNATURE);
}
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@ -207,8 +180,6 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_gc = dpu_hw_lm_gc;
ops->setup_misr = dpu_hw_lm_setup_misr;
ops->collect_misr = dpu_hw_lm_collect_misr;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {

View File

@ -66,13 +66,6 @@ struct dpu_hw_lm_ops {
*/
void (*setup_gc)(struct dpu_hw_mixer *mixer,
void *cfg);
/* setup_misr: enables/disables MISR in HW register */
void (*setup_misr)(struct dpu_hw_mixer *ctx,
bool enable, u32 frame_count);
/* collect_misr: reads and stores MISR data from HW register */
u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
};
struct dpu_hw_mixer {

View File

@ -100,7 +100,6 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_SSPP,
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_CDM,
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
@ -173,13 +172,6 @@ enum dpu_dspp {
DSPP_MAX
};
enum dpu_ds {
DS_TOP,
DS_0,
DS_1,
DS_MAX
};
enum dpu_ctl {
CTL_0 = 1,
CTL_1,
@ -189,12 +181,6 @@ enum dpu_ctl {
CTL_MAX
};
enum dpu_cdm {
CDM_0 = 1,
CDM_1,
CDM_MAX
};
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@ -246,12 +232,6 @@ enum dpu_wb {
WB_MAX
};
enum dpu_ad {
AD_0 = 0x1,
AD_1,
AD_MAX
};
enum dpu_cwb {
CWB_0 = 0x1,
CWB_1,
@ -451,15 +431,14 @@ struct dpu_mdss_color {
* Define bit masks for h/w logging.
*/
#define DPU_DBG_MASK_NONE (1 << 0)
#define DPU_DBG_MASK_CDM (1 << 1)
#define DPU_DBG_MASK_INTF (1 << 2)
#define DPU_DBG_MASK_LM (1 << 3)
#define DPU_DBG_MASK_CTL (1 << 4)
#define DPU_DBG_MASK_PINGPONG (1 << 5)
#define DPU_DBG_MASK_SSPP (1 << 6)
#define DPU_DBG_MASK_WB (1 << 7)
#define DPU_DBG_MASK_TOP (1 << 8)
#define DPU_DBG_MASK_VBIF (1 << 9)
#define DPU_DBG_MASK_ROT (1 << 10)
#define DPU_DBG_MASK_INTF (1 << 1)
#define DPU_DBG_MASK_LM (1 << 2)
#define DPU_DBG_MASK_CTL (1 << 3)
#define DPU_DBG_MASK_PINGPONG (1 << 4)
#define DPU_DBG_MASK_SSPP (1 << 5)
#define DPU_DBG_MASK_WB (1 << 6)
#define DPU_DBG_MASK_TOP (1 << 7)
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
#endif /* _DPU_HW_MDSS_H */

View File

@ -98,23 +98,6 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
struct cdm_output_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 out_ctl = 0;
if (!mdp || !cfg)
return;
c = &mdp->hw;
if (cfg->intf_en)
out_ctl |= BIT(19);
DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
}
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
@ -307,7 +290,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_cdm_output = dpu_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;

View File

@ -51,14 +51,6 @@ struct split_pipe_cfg {
bool split_flush_en;
};
/**
* struct cdm_output_cfg: output configuration for cdm
* @intf_en : enable/disable interface output
*/
struct cdm_output_cfg {
bool intf_en;
};
/**
* struct dpu_danger_safe_status: danger and safe status signals
* @mdp: top level status
@ -89,7 +81,6 @@ struct dpu_vsync_source_cfg {
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
* @setup_pp_split : Programs the pp split control registers
* @setup_cdm_output : programs cdm control
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
@ -101,14 +92,6 @@ struct dpu_hw_mdp_ops {
void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
struct split_pipe_cfg *p);
/**
* setup_cdm_output() : Setup selection control of the cdm data path
* @mdp : mdp top context driver
* @cfg : cdm output configuration
*/
void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
struct cdm_output_cfg *cfg);
/**
* setup_traffic_shaper() : Setup traffic shaper control
* @mdp : mdp top context driver

View File

@ -50,9 +50,6 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
#define QSEED3_MISR_CTRL 0x70
#define QSEED3_MISR_SIGNATURE_0 0x74
#define QSEED3_MISR_SIGNATURE_1 0x78
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98

View File

@ -148,16 +148,6 @@ struct dpu_hw_scaler3_cfg {
struct dpu_hw_scaler3_de_cfg de;
};
struct dpu_hw_scaler3_lut_cfg {
bool is_configured;
u32 *dir_lut;
size_t dir_len;
u32 *cir_lut;
size_t cir_len;
u32 *sep_lut;
size_t sep_len;
};
/**
* struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
@ -325,12 +315,6 @@ int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
#define MISR_FRAME_COUNT_MASK 0xFF
#define MISR_CTRL_ENABLE BIT(8)
#define MISR_CTRL_STATUS BIT(9)
#define MISR_CTRL_STATUS_CLEAR BIT(10)
#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
void *dpu_hw_util_get_dir(void);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,

View File

@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
int i, rc;
/*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR_OR_NULL(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return;
@ -531,12 +531,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
int primary_planes_idx = 0, i, ret;
int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
@ -556,16 +557,24 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
/* Create the planes */
/* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
bool primary = true;
enum drm_plane_type type;
if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
|| primary_planes_idx >= max_crtc_count)
primary = false;
if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
&& cursor_planes_idx < max_crtc_count)
type = DRM_PLANE_TYPE_CURSOR;
else if (primary_planes_idx < max_crtc_count)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
(1UL << max_crtc_count) - 1, 0);
DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
(1UL << max_crtc_count) - 1, 0);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@ -573,7 +582,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
}
priv->planes[priv->num_planes++] = plane;
if (primary)
if (type == DRM_PLANE_TYPE_CURSOR)
cursor_planes[cursor_planes_idx++] = plane;
else if (type == DRM_PLANE_TYPE_PRIMARY)
primary_planes[primary_planes_idx++] = plane;
}
@ -581,7 +592,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
/* Create one CRTC per encoder */
for (i = 0; i < max_crtc_count; i++) {
crtc = dpu_crtc_init(dev, primary_planes[i]);
crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
goto fail;
@ -956,8 +967,7 @@ static void dpu_kms_handle_power_event(u32 event_type, void *usr)
if (!dpu_kms)
return;
if (event_type == DPU_POWER_EVENT_POST_ENABLE)
dpu_vbif_init_memtypes(dpu_kms);
dpu_vbif_init_memtypes(dpu_kms);
}
static int dpu_kms_hw_init(struct msm_kms *kms)
@ -1144,10 +1154,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
/*
* Handle (re)initializations during power enable
*/
dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
dpu_kms->power_event = dpu_power_handle_register_event(
&dpu_kms->phandle,
DPU_POWER_EVENT_POST_ENABLE,
&dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
dpu_kms_handle_power_event, dpu_kms, "kms");
pm_runtime_put_sync(&dpu_kms->pdev->dev);

View File

@ -158,6 +158,8 @@ static void dpu_mdss_destroy(struct drm_device *dev)
_dpu_mdss_irq_domain_fini(dpu_mdss);
free_irq(platform_get_irq(pdev, 0), dpu_mdss);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
@ -215,7 +217,7 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret)
goto irq_domain_error;
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
ret = request_irq(platform_get_irq(pdev, 0),
dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
if (ret) {
DPU_ERROR("failed to init irq: %d\n", ret);

View File

@ -125,26 +125,11 @@ struct dpu_plane {
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv;
struct msm_drm_private *priv = plane->dev->dev_private;
if (!plane || !plane->dev)
return NULL;
priv = plane->dev->dev_private;
if (!priv)
return NULL;
return to_dpu_kms(priv->kms);
}
static bool dpu_plane_enabled(struct drm_plane_state *state)
{
return state && state->fb && state->crtc;
}
static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
{
return state && state->crtc;
}
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
@ -160,7 +145,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
u32 fixed_buff_size;
u32 total_fl;
if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
if (!fmt || !plane->state || !src_width || !fmt->bpp) {
DPU_ERROR("invalid arguments\n");
return 0;
}
@ -170,7 +155,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
if (!dpu_plane_enabled(tmp->base.state))
if (!tmp->base.state->visible)
continue;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
@ -241,26 +226,11 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu;
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u64 qos_lut;
u32 total_fl = 0, lut_usage;
if (!plane || !fb) {
DPU_ERROR("invalid arguments plane %d fb %d\n",
plane != 0, fb != 0);
return;
}
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
DPU_ERROR("invalid arguments\n");
return;
} else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
return;
}
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
@ -302,24 +272,10 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu;
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u32 danger_lut, safe_lut;
if (!plane || !fb) {
DPU_ERROR("invalid arguments\n");
return;
}
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
DPU_ERROR("invalid arguments\n");
return;
} else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
return;
}
if (!pdpu->is_rt_pipe) {
danger_lut = pdpu->catalog->perf.danger_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
@ -373,21 +329,7 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
struct dpu_plane *pdpu;
if (!plane) {
DPU_ERROR("invalid arguments\n");
return;
}
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
DPU_ERROR("invalid arguments\n");
return;
} else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
return;
}
struct dpu_plane *pdpu = to_dpu_plane(plane);
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
@ -423,35 +365,17 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
&pdpu->pipe_qos_cfg);
}
int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
struct dpu_plane *pdpu;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!plane || !plane->dev) {
DPU_ERROR("invalid arguments\n");
return -EINVAL;
}
priv = plane->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
return -EINVAL;
}
dpu_kms = to_dpu_kms(priv->kms);
pdpu = to_dpu_plane(plane);
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!pdpu->is_rt_pipe)
goto end;
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
end:
return 0;
}
/**
@ -462,29 +386,9 @@ int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct drm_crtc *crtc)
{
struct dpu_plane *pdpu;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!plane || !plane->dev || !crtc) {
DPU_ERROR("invalid arguments plane %d crtc %d\n",
plane != 0, crtc != 0);
return;
}
priv = plane->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
return;
}
dpu_kms = to_dpu_kms(priv->kms);
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw) {
DPU_ERROR("invalid pipe reference\n");
return;
}
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
@ -506,28 +410,9 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!plane || !plane->dev) {
DPU_ERROR("invalid arguments\n");
return;
}
priv = plane->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
return;
}
dpu_kms = to_dpu_kms(priv->kms);
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw) {
DPU_ERROR("invalid pipe reference\n");
return;
}
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
@ -548,27 +433,12 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
/**
* _dpu_plane_get_aspace: gets the address space
*/
static int _dpu_plane_get_aspace(
struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
struct msm_gem_address_space **aspace)
static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
struct dpu_plane *pdpu)
{
struct dpu_kms *kms;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
if (!pdpu || !pstate || !aspace) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
kms = _dpu_plane_get_kms(&pdpu->base);
if (!kms) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
*aspace = kms->base.aspace;
return 0;
return kms->base.aspace;
}
static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
@ -576,29 +446,10 @@ static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu;
struct msm_gem_address_space *aspace = NULL;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
if (!plane || !pstate || !pipe_cfg || !fb) {
DPU_ERROR(
"invalid arg(s), plane %d state %d cfg %d fb %d\n",
plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
return;
}
pdpu = to_dpu_plane(plane);
if (!pdpu->pipe_hw) {
DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
return;
}
ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
if (ret) {
DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
return;
}
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
@ -622,15 +473,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
{
uint32_t i;
if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
!chroma_subsmpl_v) {
DPU_ERROR(
"pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
!!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
chroma_subsmpl_v);
return;
}
memset(scale_cfg, 0, sizeof(*scale_cfg));
memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
@ -734,17 +576,8 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
const struct dpu_format *fmt, bool color_fill)
{
struct dpu_hw_pixel_ext *pe;
uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
if (!pdpu || !fmt || !pstate) {
DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
pdpu != 0, fmt != 0, pstate != 0);
return;
}
pe = &pstate->pixel_ext;
/* don't chroma subsample if decimating */
chroma_subsmpl_h =
drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
@ -772,21 +605,8 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
uint32_t color, uint32_t alpha)
{
const struct dpu_format *fmt;
const struct drm_plane *plane;
struct dpu_plane_state *pstate;
if (!pdpu || !pdpu->base.state) {
DPU_ERROR("invalid plane\n");
return -EINVAL;
}
if (!pdpu->pipe_hw) {
DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
return -EINVAL;
}
plane = &pdpu->base;
pstate = to_dpu_plane_state(plane->state);
const struct drm_plane *plane = &pdpu->base;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
DPU_DEBUG_PLANE(pdpu, "\n");
@ -837,12 +657,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
{
struct dpu_plane_state *pstate;
if (!drm_state)
return;
pstate = to_dpu_plane_state(drm_state);
struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state);
pstate->multirect_index = DPU_SSPP_RECT_SOLO;
pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
@ -973,15 +788,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp)
{
struct dpu_plane_state *pstate;
if (!plane || !flush_sspp) {
DPU_ERROR("invalid parameters\n");
return;
}
pstate = to_dpu_plane_state(plane->state);
*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
}
@ -995,7 +801,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct msm_gem_address_space *aspace;
struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
if (!new_state->fb)
@ -1003,12 +809,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
if (ret) {
DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
return ret;
}
/* cache aspace */
pstate->aspace = aspace;
@ -1078,33 +878,30 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
drm_rect_equals(fb_rect, src);
}
static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
int ret = 0;
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
uint32_t max_upscale = 1, max_downscale = 1;
uint32_t min_src_size, max_linewidth;
int hscale = 1, vscale = 1;
if (!plane || !state) {
DPU_ERROR("invalid arg(s), plane %d state %d\n",
plane != 0, state != 0);
ret = -EINVAL;
goto exit;
}
pdpu = to_dpu_plane(plane);
pstate = to_dpu_plane_state(state);
if (!pdpu->pipe_sblk) {
DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
ret = -EINVAL;
goto exit;
if (state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state->state,
state->crtc);
min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
pdpu->pipe_sblk->maxupscale << 16,
true, true);
if (ret) {
DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
if (!state->visible)
return 0;
src.x1 = state->src_x >> 16;
src.y1 = state->src_y >> 16;
@ -1118,25 +915,6 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
if (pdpu->features & DPU_SSPP_SCALER) {
max_downscale = pdpu->pipe_sblk->maxdwnscale;
max_upscale = pdpu->pipe_sblk->maxupscale;
}
if (drm_rect_width(&src) < drm_rect_width(&dst))
hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
else
hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
if (drm_rect_height(&src) < drm_rect_height(&dst))
vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
else
vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
if (!dpu_plane_enabled(state))
goto exit;
fmt = to_dpu_format(msm_framebuffer_format(state->fb));
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@ -1147,13 +925,13 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_ERROR_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
ret = -EINVAL;
return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
ret = -E2BIG;
return -E2BIG;
/* valid yuv image */
} else if (DPU_FORMAT_IS_YUV(fmt) &&
@ -1162,41 +940,22 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
drm_rect_height(&src) & 0x1)) {
DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
ret = -EINVAL;
return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
ret = -EINVAL;
return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
ret = -E2BIG;
/* check scaler capability */
} else if (hscale < 0 || vscale < 0) {
DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
ret = -E2BIG;
return -E2BIG;
}
exit:
return ret;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
if (!state->fb)
return 0;
DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
return dpu_plane_sspp_atomic_check(plane, state);
return 0;
}
void dpu_plane_flush(struct drm_plane *plane)
@ -1245,46 +1004,16 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error)
pdpu->is_error = error;
}
static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
{
uint32_t nplanes, src_flags;
struct dpu_plane *pdpu;
struct drm_plane_state *state;
struct dpu_plane_state *pstate;
struct dpu_plane_state *old_pstate;
const struct dpu_format *fmt;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_rect src, dst;
if (!plane) {
DPU_ERROR("invalid plane\n");
return -EINVAL;
} else if (!plane->state) {
DPU_ERROR("invalid plane state\n");
return -EINVAL;
} else if (!old_state) {
DPU_ERROR("invalid old state\n");
return -EINVAL;
}
pdpu = to_dpu_plane(plane);
state = plane->state;
pstate = to_dpu_plane_state(state);
old_pstate = to_dpu_plane_state(old_state);
crtc = state->crtc;
fb = state->fb;
if (!crtc || !fb) {
DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
crtc != 0, fb != 0);
return -EINVAL;
}
fmt = to_dpu_format(msm_framebuffer_format(fb));
nplanes = fmt->num_planes;
uint32_t src_flags;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
@ -1295,28 +1024,27 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
src.x1 = state->src_x >> 16;
src.y1 = state->src_y >> 16;
src.x2 = src.x1 + (state->src_w >> 16);
src.y2 = src.y1 + (state->src_h >> 16);
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
dst = drm_plane_state_dest(state);
pdpu->pipe_cfg.src_rect = state->src;
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
crtc->base.id, DRM_RECT_ARG(&dst),
(char *)&fmt->base.pixel_format,
DPU_FORMAT_IS_UBWC(fmt));
/* state->src is 16.16, src_rect is not */
pdpu->pipe_cfg.src_rect.x1 >>= 16;
pdpu->pipe_cfg.src_rect.x2 >>= 16;
pdpu->pipe_cfg.src_rect.y1 >>= 16;
pdpu->pipe_cfg.src_rect.y2 >>= 16;
pdpu->pipe_cfg.src_rect = src;
pdpu->pipe_cfg.dst_rect = dst;
pdpu->pipe_cfg.dst_rect = state->dst;
_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
/* skip remaining processing on color fill */
return 0;
return;
}
if (pdpu->pipe_hw->ops.setup_rects) {
@ -1387,30 +1115,13 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
}
_dpu_plane_set_qos_remap(plane);
return 0;
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct drm_plane_state *state;
struct dpu_plane_state *pstate;
if (!plane) {
DPU_ERROR("invalid plane\n");
return;
} else if (!plane->state) {
DPU_ERROR("invalid plane state\n");
return;
} else if (!old_state) {
DPU_ERROR("invalid old state\n");
return;
}
pdpu = to_dpu_plane(plane);
state = plane->state;
pstate = to_dpu_plane_state(state);
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
pstate->multirect_mode);
@ -1426,31 +1137,17 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane,
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dpu_plane *pdpu;
struct drm_plane_state *state;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
if (!plane) {
DPU_ERROR("invalid plane\n");
return;
} else if (!plane->state) {
DPU_ERROR("invalid plane state\n");
return;
}
pdpu = to_dpu_plane(plane);
pdpu->is_error = false;
state = plane->state;
DPU_DEBUG_PLANE(pdpu, "\n");
if (!dpu_plane_sspp_enabled(state)) {
_dpu_plane_atomic_disable(plane, old_state);
if (!state->visible) {
_dpu_plane_atomic_disable(plane);
} else {
int ret;
ret = dpu_plane_sspp_atomic_update(plane, old_state);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
dpu_plane_sspp_atomic_update(plane);
}
}
@ -1487,8 +1184,7 @@ static void dpu_plane_destroy(struct drm_plane *plane)
/* this will destroy the states as well */
drm_plane_cleanup(plane);
if (pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
dpu_hw_sspp_destroy(pdpu->pipe_hw);
kfree(pdpu);
}
@ -1507,9 +1203,7 @@ static void dpu_plane_destroy_state(struct drm_plane *plane,
pstate = to_dpu_plane_state(state);
/* remove ref count for frame buffers */
if (state->fb)
drm_framebuffer_put(state->fb);
__drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
@ -1829,40 +1523,17 @@ bool is_dpu_plane_virtual(struct drm_plane *plane)
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
const struct dpu_format_extended *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv;
struct dpu_kms *kms;
enum drm_plane_type type;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
int ret = -EINVAL;
if (!dev) {
DPU_ERROR("[%u]device is NULL\n", pipe);
goto exit;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("[%u]private data is NULL\n", pipe);
goto exit;
}
if (!priv->kms) {
DPU_ERROR("[%u]invalid KMS reference\n", pipe);
goto exit;
}
kms = to_dpu_kms(priv->kms);
if (!kms->catalog) {
DPU_ERROR("[%u]invalid catalog reference\n", pipe);
goto exit;
}
/* create and zero local structure */
pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
if (!pdpu) {
@ -1918,12 +1589,6 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
if (pdpu->features & BIT(DPU_SSPP_CURSOR))
type = DRM_PLANE_TYPE_CURSOR;
else if (primary_plane)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
pdpu->formats, pdpu->nformats,
NULL, type, NULL);

View File

@ -122,7 +122,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
* @primary_plane: true if this pipe is primary plane for crtc
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
@ -130,7 +130,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id);
/**

View File

@ -145,6 +145,7 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct dpu_power_client *client;
u32 event_type;
if (!phandle || !pclient) {
pr_err("invalid input argument\n");
@ -181,19 +182,9 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
if (!changed)
goto end;
if (enable) {
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_PRE_ENABLE);
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_POST_ENABLE);
} else {
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_PRE_DISABLE);
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_POST_DISABLE);
}
event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
dpu_power_event_trigger_locked(phandle, event_type);
end:
mutex_unlock(&phandle->phandle_lock);
return 0;

View File

@ -23,17 +23,9 @@
#include "dpu_io_util.h"
/* event will be triggered before power handler disable */
#define DPU_POWER_EVENT_PRE_DISABLE 0x1
/* event will be triggered after power handler disable */
#define DPU_POWER_EVENT_POST_DISABLE 0x2
/* event will be triggered before power handler enable */
#define DPU_POWER_EVENT_PRE_ENABLE 0x4
/* event will be triggered after power handler enable */
#define DPU_POWER_EVENT_POST_ENABLE 0x8
/* events will be triggered on power handler enable/disable */
#define DPU_POWER_EVENT_DISABLE BIT(0)
#define DPU_POWER_EVENT_ENABLE BIT(1)
/**
* mdss_bus_vote_type: register bus vote type

View File

@ -16,7 +16,6 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
#include "dpu_encoder.h"
@ -25,38 +24,13 @@
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
(t).num_comp_enc == (r).num_enc && \
(t).num_intf == (r).num_intf)
struct dpu_rm_topology_def {
enum dpu_rm_topology_name top_name;
int num_lm;
int num_comp_enc;
int num_intf;
int num_ctl;
int needs_split_display;
};
static const struct dpu_rm_topology_def g_top_table[] = {
{ DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
{ DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
{ DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
{ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
};
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @top_ctrl: topology control preference from kernel client
* @top: selected topology for the display
* @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
uint64_t top_ctrl;
const struct dpu_rm_topology_def *topology;
struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
};
@ -72,13 +46,11 @@ struct dpu_rm_requirements {
* @enc_id: Reservations are tracked by Encoder DRM object ID.
* CRTCs may be connected to multiple Encoders.
* An encoder or connector id identifies the display path.
* @topology DRM<->HW topology use case
*/
struct dpu_rm_rsvp {
struct list_head list;
uint32_t seq;
uint32_t enc_id;
enum dpu_rm_topology_name topology;
};
/**
@ -122,8 +94,8 @@ static void _dpu_rm_print_rsvps(
DPU_DEBUG("%d\n", stage);
list_for_each_entry(rsvp, &rm->rsvps, list) {
DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
rsvp->enc_id, rsvp->topology);
DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
rsvp->enc_id);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@ -146,18 +118,6 @@ struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
return rm->hw_mdp;
}
enum dpu_rm_topology_name
dpu_rm_get_topology_name(struct msm_display_topology topology)
{
int i;
for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
return g_top_table[i].top_name;
return DPU_RM_TOPOLOGY_NONE;
}
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@ -229,9 +189,6 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
case DPU_HW_BLK_CTL:
dpu_hw_ctl_destroy(hw);
break;
case DPU_HW_BLK_CDM:
dpu_hw_cdm_destroy(hw);
break;
case DPU_HW_BLK_PINGPONG:
dpu_hw_pingpong_destroy(hw);
break;
@ -305,9 +262,6 @@ static int _dpu_rm_hw_blk_create(
case DPU_HW_BLK_CTL:
hw = dpu_hw_ctl_init(id, mmio, cat);
break;
case DPU_HW_BLK_CDM:
hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
break;
case DPU_HW_BLK_PINGPONG:
hw = dpu_hw_pingpong_init(id, mmio, cat);
break;
@ -438,15 +392,6 @@ int dpu_rm_init(struct dpu_rm *rm,
}
}
for (i = 0; i < cat->cdm_count; i++) {
rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
cat->cdm[i].id, &cat->cdm[i]);
if (rc) {
DPU_ERROR("failed: cdm hw not available\n");
goto fail;
}
}
return 0;
fail:
@ -455,6 +400,11 @@ int dpu_rm_init(struct dpu_rm *rm,
return rc;
}
static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
{
return top->num_intf > 1;
}
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
@ -538,14 +488,14 @@ static int _dpu_rm_reserve_lms(
int lm_count = 0;
int i, rc = 0;
if (!reqs->topology->num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&pp, 0, sizeof(pp));
@ -563,7 +513,7 @@ static int _dpu_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
while (lm_count != reqs->topology->num_lm &&
while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@ -578,7 +528,7 @@ static int _dpu_rm_reserve_lms(
}
}
if (lm_count != reqs->topology->num_lm) {
if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@ -600,14 +550,20 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
const struct dpu_rm_topology_def *top)
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
struct dpu_rm_hw_iter iter;
int i = 0;
int i = 0, num_ctls = 0;
bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
/* each hw_intf needs its own hw_ctrl to program its control path */
num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
@ -621,20 +577,20 @@ static int _dpu_rm_reserve_ctls(
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
if (top->needs_split_display != has_split_display)
if (needs_split_display != has_split_display)
continue;
ctls[i] = iter.blk;
DPU_DEBUG("ctl %d match\n", iter.blk->id);
if (++i == top->num_ctl)
if (++i == num_ctls)
break;
}
if (i != top->num_ctl)
if (i != num_ctls)
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->rsvp_nxt = rsvp;
trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
rsvp->enc_id);
@ -643,55 +599,11 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
static int _dpu_rm_reserve_cdm(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
uint32_t id,
enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
DRM_DEBUG_KMS("type %d id %d\n", type, id);
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
const struct dpu_cdm_cfg *caps = cdm->caps;
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
continue;
if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
match = test_bit(id, &caps->intf_connect);
DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
iter.blk->type, iter.blk->id, rsvp->enc_id,
caps->intf_connect, match);
if (!match)
continue;
trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
rsvp->enc_id);
iter.blk->rsvp_nxt = rsvp;
break;
}
if (!iter.hw) {
DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
return -ENAVAIL;
}
return 0;
}
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
uint32_t id,
enum dpu_hw_blk_type type,
bool needs_cdm)
enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
int ret = 0;
@ -719,9 +631,6 @@ static int _dpu_rm_reserve_intf(
return -EINVAL;
}
if (needs_cdm)
ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
return ret;
}
@ -738,7 +647,7 @@ static int _dpu_rm_reserve_intf_related_hw(
continue;
id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, rsvp, id,
DPU_HW_BLK_INTF, hw_res->needs_cdm);
DPU_HW_BLK_INTF);
if (ret)
return ret;
}
@ -750,17 +659,14 @@ static int _dpu_rm_make_next_rsvp(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
struct dpu_rm_topology_def topology;
/* Create reservation info, tag reserved blocks with it as we go */
rsvp->seq = ++rm->rsvp_next_seq;
rsvp->enc_id = enc->base.id;
rsvp->topology = reqs->topology->top_name;
list_add_tail(&rsvp->list, &rm->rsvps);
ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
@ -769,23 +675,12 @@ static int _dpu_rm_make_next_rsvp(
return ret;
}
/*
* Do assignment preferring to give away low-resource CTLs first:
* - Check mixers without Split Display
* - Only then allow to grab from CTLs with split display capability
*/
_dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
if (ret && !reqs->topology->needs_split_display) {
memcpy(&topology, reqs->topology, sizeof(topology));
topology.needs_split_display = true;
_dpu_rm_reserve_ctls(rm, rsvp, &topology);
}
ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
/* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
if (ret)
return ret;
@ -797,44 +692,16 @@ static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
int i;
dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
memset(reqs, 0, sizeof(*reqs));
reqs->topology = req_topology;
dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
req_topology)) {
reqs->topology = &g_top_table[i];
break;
}
}
if (!reqs->topology) {
DPU_ERROR("invalid topology for the display\n");
return -EINVAL;
}
/**
* Set the requirement based on caps if not set from user space
* This will ensure to select LM tied with DS blocks
* Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
*/
if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
reqs->hw_res.display_num_of_h_tiles);
DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
reqs->topology->num_lm, reqs->topology->num_ctl,
reqs->topology->top_name,
reqs->topology->needs_split_display);
DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
reqs->topology.num_lm, reqs->topology.num_enc,
reqs->topology.num_intf);
return 0;
}
@ -860,29 +727,12 @@ static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
return NULL;
}
static struct drm_connector *_dpu_rm_get_connector(
struct drm_encoder *enc)
{
struct drm_connector *conn = NULL;
struct list_head *connector_list =
&enc->dev->mode_config.connector_list;
list_for_each_entry(conn, connector_list, head)
if (conn->encoder == enc)
return conn;
return NULL;
}
/**
* _dpu_rm_release_rsvp - release resources and release a reservation
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
static void _dpu_rm_release_rsvp(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
struct drm_connector *conn)
static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
@ -923,7 +773,6 @@ static void _dpu_rm_release_rsvp(
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
struct dpu_rm_rsvp *rsvp;
struct drm_connector *conn;
if (!rm || !enc) {
DPU_ERROR("invalid params\n");
@ -938,25 +787,15 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
goto end;
}
conn = _dpu_rm_get_connector(enc);
if (!conn) {
DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
goto end;
}
_dpu_rm_release_rsvp(rm, rsvp, conn);
_dpu_rm_release_rsvp(rm, rsvp);
end:
mutex_unlock(&rm->rm_lock);
}
static int _dpu_rm_commit_rsvp(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
struct drm_connector_state *conn_state)
static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
int ret = 0;
/* Swap next rsvp to be the active */
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@ -967,19 +806,12 @@ static int _dpu_rm_commit_rsvp(
}
}
}
if (!ret)
DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
rsvp->topology);
return ret;
}
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only)
{
@ -987,25 +819,19 @@ int dpu_rm_reserve(
struct dpu_rm_requirements reqs;
int ret;
if (!rm || !enc || !crtc_state || !conn_state) {
DPU_ERROR("invalid arguments\n");
return -EINVAL;
}
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
conn_state->connector->base.id, enc->base.id,
crtc_state->crtc->base.id, test_only);
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
enc->base.id, crtc_state->crtc->base.id, test_only);
mutex_lock(&rm->rm_lock);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs, topology);
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
@ -1030,28 +856,15 @@ int dpu_rm_reserve(
rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
/*
* User can request that we clear out any reservation during the
* atomic_check phase by using this CLEAR bit
*/
if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
rsvp_cur->seq, rsvp_cur->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
rsvp_cur = NULL;
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
}
/* Check the proposed reservation, store it in hw's "next" field */
ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
rsvp_nxt, &reqs);
ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
} else if (test_only && !RM_RQ_LOCK(&reqs)) {
_dpu_rm_release_rsvp(rm, rsvp_nxt);
} else if (test_only) {
/*
* Normally, if test_only, test the reservation and then undo
* However, if the user requests LOCK, then keep the reservation
@ -1059,15 +872,11 @@ int dpu_rm_reserve(
*/
DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
_dpu_rm_release_rsvp(rm, rsvp_nxt);
} else {
if (test_only && RM_RQ_LOCK(&reqs))
DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
_dpu_rm_release_rsvp(rm, rsvp_cur);
_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
_dpu_rm_commit_rsvp(rm, rsvp_nxt);
}
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);

View File

@ -20,39 +20,6 @@
#include "msm_kms.h"
#include "dpu_hw_top.h"
/**
* enum dpu_rm_topology_name - HW resource use case in use by connector
* @DPU_RM_TOPOLOGY_NONE: No topology in use currently
* @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
*/
enum dpu_rm_topology_name {
DPU_RM_TOPOLOGY_NONE = 0,
DPU_RM_TOPOLOGY_SINGLEPIPE,
DPU_RM_TOPOLOGY_DUALPIPE,
DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
DPU_RM_TOPOLOGY_MAX,
};
/**
* enum dpu_rm_topology_control - HW resource use case in use by connector
* @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
* test, reserve the resources for this display.
* Normal behavior would not impact the reservation
* list during the AtomicTest phase.
* @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
* release any reservation held by this display.
* Normal behavior would not impact the
* reservation list during the AtomicTest phase.
* @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
*/
enum dpu_rm_topology_control {
DPU_RM_TOPCTL_RESERVE_LOCK,
DPU_RM_TOPCTL_RESERVE_CLEAR,
DPU_RM_TOPCTL_DS,
};
/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
@ -125,7 +92,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @conn_state: Proposed Atomic DRM Connector State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
@ -133,7 +99,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only);
@ -187,13 +152,4 @@ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
*/
int dpu_rm_check_property_topctl(uint64_t val);
/**
* dpu_rm_get_topology_name - returns the name of the the given topology
* definition
* @topology: topology definition
* @Return: name of the topology
*/
enum dpu_rm_topology_name
dpu_rm_get_topology_name(struct msm_display_topology topology);
#endif /* __DPU_RM_H__ */

View File

@ -468,14 +468,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb,
TRACE_EVENT(dpu_enc_trigger_flush,
TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
u32 pending_flush_ret),
TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
pending_flush_ret),
extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
__field( u32, extra_flush_bits )
__field( u32, pending_flush_ret )
),
TP_fast_assign(
@ -483,12 +485,14 @@ TRACE_EVENT(dpu_enc_trigger_flush,
__entry->intf_idx = intf_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
__entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
"pending_flush_ret=%u", __entry->drm_id,
__entry->intf_idx, __entry->pending_kickoff_cnt,
__entry->ctl_idx, __entry->pending_flush_ret)
"extra_flush_bits=0x%x pending_flush_ret=0x%x",
__entry->drm_id, __entry->intf_idx,
__entry->pending_kickoff_cnt, __entry->ctl_idx,
__entry->extra_flush_bits, __entry->pending_flush_ret)
);
DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
@ -682,37 +686,41 @@ TRACE_EVENT(dpu_crtc_setup_mixer,
TP_STRUCT__entry(
__field( uint32_t, crtc_id )
__field( uint32_t, plane_id )
__field( struct drm_plane_state*,state )
__field( struct dpu_plane_state*,pstate )
__field( uint32_t, fb_id )
__field_struct( struct drm_rect, src_rect )
__field_struct( struct drm_rect, dst_rect )
__field( uint32_t, stage_idx )
__field( enum dpu_stage, stage )
__field( enum dpu_sspp, sspp )
__field( uint32_t, multirect_idx )
__field( uint32_t, multirect_mode )
__field( uint32_t, pixel_format )
__field( uint64_t, modifier )
),
TP_fast_assign(
__entry->crtc_id = crtc_id;
__entry->plane_id = plane_id;
__entry->state = state;
__entry->pstate = pstate;
__entry->fb_id = state ? state->fb->base.id : 0;
__entry->src_rect = drm_plane_state_src(state);
__entry->dst_rect = drm_plane_state_dest(state);
__entry->stage_idx = stage_idx;
__entry->stage = pstate->stage;
__entry->sspp = sspp;
__entry->multirect_idx = pstate->multirect_index;
__entry->multirect_mode = pstate->multirect_mode;
__entry->pixel_format = pixel_format;
__entry->modifier = modifier;
),
TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
"dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
" dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
"multirect_index:%d multirect_mode:%u pix_format:%u "
"modifier:%llu",
__entry->crtc_id, __entry->plane_id,
__entry->state->fb ? __entry->state->fb->base.id : -1,
__entry->state->src_w >> 16, __entry->state->src_h >> 16,
__entry->state->src_x >> 16, __entry->state->src_y >> 16,
__entry->state->crtc_w, __entry->state->crtc_h,
__entry->state->crtc_x, __entry->state->crtc_y,
__entry->stage_idx, __entry->pstate->stage, __entry->sspp,
__entry->pstate->multirect_index,
__entry->pstate->multirect_mode, __entry->pixel_format,
__entry->modifier)
__entry->crtc_id, __entry->plane_id, __entry->fb_id,
DRM_RECT_FP_ARG(&__entry->src_rect),
DRM_RECT_ARG(&__entry->dst_rect),
__entry->stage_idx, __entry->stage, __entry->sspp,
__entry->multirect_idx, __entry->multirect_mode,
__entry->pixel_format, __entry->modifier)
);
TRACE_EVENT(dpu_crtc_setup_lm_bounds,
@ -721,15 +729,15 @@ TRACE_EVENT(dpu_crtc_setup_lm_bounds,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( int, mixer )
__field( struct drm_rect *, bounds )
__field_struct( struct drm_rect, bounds )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->mixer = mixer;
__entry->bounds = bounds;
__entry->bounds = *bounds;
),
TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
__entry->mixer, DRM_RECT_ARG(__entry->bounds))
__entry->mixer, DRM_RECT_ARG(&__entry->bounds))
);
TRACE_EVENT(dpu_crtc_vblank_enable,
@ -740,21 +748,25 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, drm_id )
__field( uint32_t, enc_id )
__field( bool, enable )
__field( struct dpu_crtc *, crtc )
__field( bool, enabled )
__field( bool, suspend )
__field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
__entry->crtc = crtc;
__entry->enabled = crtc->enabled;
__entry->suspend = crtc->suspend;
__entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
"vblank_req:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
__entry->crtc->enabled ? "true" : "false",
__entry->crtc->suspend ? "true" : "false",
__entry->crtc->vblank_requested ? "true" : "false")
__entry->enabled ? "true" : "false",
__entry->suspend ? "true" : "false",
__entry->vblank_requested ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@ -763,18 +775,22 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( bool, enable )
__field( struct dpu_crtc *, crtc )
__field( bool, enabled )
__field( bool, suspend )
__field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
__entry->crtc = crtc;
__entry->enabled = crtc->enabled;
__entry->suspend = crtc->suspend;
__entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
__entry->crtc->enabled ? "true" : "false",
__entry->crtc->suspend ? "true" : "false",
__entry->crtc->vblank_requested ? "true" : "false")
__entry->enabled ? "true" : "false",
__entry->suspend ? "true" : "false",
__entry->vblank_requested ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@ -814,24 +830,24 @@ TRACE_EVENT(dpu_plane_set_scanout,
TP_ARGS(index, layout, multirect_index),
TP_STRUCT__entry(
__field( enum dpu_sspp, index )
__field( struct dpu_hw_fmt_layout*, layout )
__field_struct( struct dpu_hw_fmt_layout, layout )
__field( enum dpu_sspp_multirect_index, multirect_index)
),
TP_fast_assign(
__entry->index = index;
__entry->layout = layout;
__entry->layout = *layout;
__entry->multirect_index = multirect_index;
),
TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
"multirect_index:%d", __entry->index, __entry->layout->width,
__entry->layout->height, __entry->layout->plane_addr[0],
__entry->layout->plane_size[0],
__entry->layout->plane_addr[1],
__entry->layout->plane_size[1],
__entry->layout->plane_addr[2],
__entry->layout->plane_size[2],
__entry->layout->plane_addr[3],
__entry->layout->plane_size[3], __entry->multirect_index)
"multirect_index:%d", __entry->index, __entry->layout.width,
__entry->layout.height, __entry->layout.plane_addr[0],
__entry->layout.plane_size[0],
__entry->layout.plane_addr[1],
__entry->layout.plane_size[1],
__entry->layout.plane_addr[2],
__entry->layout.plane_size[2],
__entry->layout.plane_addr[3],
__entry->layout.plane_size[3], __entry->multirect_index)
);
TRACE_EVENT(dpu_plane_disable,
@ -868,10 +884,6 @@ DECLARE_EVENT_CLASS(dpu_rm_iter_template,
TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
__entry->enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
TP_ARGS(id, type, enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
TP_ARGS(id, type, enc_id)
@ -979,16 +991,16 @@ TRACE_EVENT(dpu_core_perf_update_clk,
TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
TP_ARGS(dev, stop_req, clk_rate),
TP_STRUCT__entry(
__field( struct drm_device *, dev )
__string( dev_name, dev->unique )
__field( bool, stop_req )
__field( u64, clk_rate )
),
TP_fast_assign(
__entry->dev = dev;
__assign_str(dev_name, dev->unique);
__entry->stop_req = stop_req;
__entry->clk_rate = clk_rate;
),
TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);

View File

@ -185,7 +185,7 @@ static void mdp5_plane_reset(struct drm_plane *plane)
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
@ -228,7 +228,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
if (state->fb)
drm_framebuffer_unreference(state->fb);
drm_framebuffer_put(state->fb);
kfree(pstate);
}
@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
msm_framebuffer_cleanup(fb, kms->aspace);
}
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *state)
{

View File

@ -83,6 +83,7 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
msm_dsi->id = -1;
msm_dsi->pdev = pdev;
platform_set_drvdata(pdev, msm_dsi);
@ -117,8 +118,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
DBG("");
msm_dsi = dsi_init(pdev);
if (IS_ERR(msm_dsi))
return PTR_ERR(msm_dsi);
if (IS_ERR(msm_dsi)) {
/* Don't fail the bind if the dsi port is not connected */
if (PTR_ERR(msm_dsi) == -ENODEV)
return 0;
else
return PTR_ERR(msm_dsi);
}
priv->dsi[msm_dsi->id] = msm_dsi;

View File

@ -1750,6 +1750,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
if (ret) {
dev_err(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
}
@ -1757,6 +1758,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
dev_dbg(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}

View File

@ -839,6 +839,8 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
if (msm_dsi->host)
msm_dsi_host_unregister(msm_dsi->host);
msm_dsim->dsi[msm_dsi->id] = NULL;
if (msm_dsi->id >= 0)
msm_dsim->dsi[msm_dsi->id] = NULL;
}

View File

@ -337,7 +337,7 @@ static int msm_drm_uninit(struct device *dev)
mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
drm_dev_put(ddev);
kfree(priv);
@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto err_unref_drm_dev;
goto err_put_drm_dev;
}
ddev->dev_private = priv;
@ -653,8 +653,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
mdss->funcs->destroy(ddev);
err_free_priv:
kfree(priv);
err_unref_drm_dev:
drm_dev_unref(ddev);
err_put_drm_dev:
drm_dev_put(ddev);
return ret;
}

View File

@ -62,6 +62,8 @@ struct msm_gem_vma;
#define MAX_BRIDGES 8
#define MAX_CONNECTORS 8
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;

View File

@ -144,7 +144,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
goto out_unlock;
}
drm_gem_object_reference(obj);
drm_gem_object_get(obj);
submit->bos[i].obj = msm_obj;
@ -396,7 +396,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
drm_gem_object_put(&msm_obj->base);
}
ww_acquire_fini(&submit->ticket);

View File

@ -41,7 +41,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
if (IS_ERR(opp))
return PTR_ERR(opp);
clk_set_rate(gpu->core_clk, *freq);
if (gpu->funcs->gpu_set_freq)
gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
else
clk_set_rate(gpu->core_clk, *freq);
dev_pm_opp_put(opp);
return 0;
@ -51,16 +55,14 @@ static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
u64 cycles;
u32 freq = ((u32) status->current_frequency) / 1000000;
ktime_t time;
status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
gpu->funcs->gpu_busy(gpu, &cycles);
if (gpu->funcs->gpu_get_freq)
status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
else
status->current_frequency = clk_get_rate(gpu->core_clk);
status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
gpu->devfreq.busy_cycles = cycles;
status->busy_time = gpu->funcs->gpu_busy(gpu);
time = ktime_get();
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
@ -73,7 +75,10 @@ static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
*freq = (unsigned long) clk_get_rate(gpu->core_clk);
if (gpu->funcs->gpu_get_freq)
*freq = gpu->funcs->gpu_get_freq(gpu);
else
*freq = clk_get_rate(gpu->core_clk);
return 0;
}
@ -88,7 +93,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy || !gpu->core_clk)
if (!gpu->funcs->gpu_busy)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@ -105,6 +110,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
devfreq_suspend_device(gpu->devfreq.devfreq);
}
static int enable_pwrrail(struct msm_gpu *gpu)
@ -184,6 +191,14 @@ static int disable_axi(struct msm_gpu *gpu)
return 0;
}
void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
{
gpu->devfreq.busy_cycles = 0;
gpu->devfreq.time = ktime_get();
devfreq_resume_device(gpu->devfreq.devfreq);
}
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@ -202,12 +217,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
if (gpu->devfreq.devfreq) {
gpu->devfreq.busy_cycles = 0;
gpu->devfreq.time = ktime_get();
devfreq_resume_device(gpu->devfreq.devfreq);
}
msm_gpu_resume_devfreq(gpu);
gpu->needs_hw_init = true;
@ -220,8 +230,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
if (gpu->devfreq.devfreq)
devfreq_suspend_device(gpu->devfreq.devfreq);
devfreq_suspend_device(gpu->devfreq.devfreq);
ret = disable_axi(gpu);
if (ret)
@ -367,8 +376,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
char *cmd)
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, char *comm, char *cmd)
{
}
#endif

View File

@ -70,9 +70,11 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
};
struct msm_gpu {
@ -264,6 +266,7 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
void msm_gpu_resume_devfreq(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);

View File

@ -366,7 +366,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
va_list args;
va_start(args, fmt);
n = vsnprintf(msg, sizeof(msg), fmt, args);
n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
@ -375,11 +375,11 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();