2013-07-19 23:59:32 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Red Hat
|
|
|
|
* Author: Rob Clark <robdclark@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 as published by
|
|
|
|
* the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
|
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "msm_gpu.h"
|
|
|
|
#include "msm_gem.h"
|
2013-11-17 00:56:06 +07:00
|
|
|
#include "msm_mmu.h"
|
2016-03-16 02:35:08 +07:00
|
|
|
#include "msm_fence.h"
|
2018-11-02 22:25:21 +07:00
|
|
|
#include "msm_gpu_trace.h"
|
2018-11-15 05:08:04 +07:00
|
|
|
#include "adreno/adreno_gpu.h"
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2018-07-24 23:33:27 +07:00
|
|
|
#include <generated/utsrelease.h>
|
2017-09-13 21:17:18 +07:00
|
|
|
#include <linux/string_helpers.h>
|
2018-01-11 00:41:54 +07:00
|
|
|
#include <linux/pm_opp.h>
|
|
|
|
#include <linux/devfreq.h>
|
2018-07-24 23:33:27 +07:00
|
|
|
#include <linux/devcoredump.h>
|
2013-07-19 23:59:32 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Power Management:
|
|
|
|
*/
|
|
|
|
|
2018-01-11 00:41:54 +07:00
|
|
|
static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
|
|
|
u32 flags)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
|
|
struct dev_pm_opp *opp;
|
|
|
|
|
|
|
|
opp = devfreq_recommended_opp(dev, freq, flags);
|
|
|
|
|
|
|
|
if (IS_ERR(opp))
|
|
|
|
return PTR_ERR(opp);
|
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
if (gpu->funcs->gpu_set_freq)
|
|
|
|
gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
|
|
|
|
else
|
|
|
|
clk_set_rate(gpu->core_clk, *freq);
|
|
|
|
|
2018-01-11 00:41:54 +07:00
|
|
|
dev_pm_opp_put(opp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msm_devfreq_get_dev_status(struct device *dev,
|
|
|
|
struct devfreq_dev_status *status)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
|
|
ktime_t time;
|
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
if (gpu->funcs->gpu_get_freq)
|
|
|
|
status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
|
|
|
|
else
|
|
|
|
status->current_frequency = clk_get_rate(gpu->core_clk);
|
2018-01-11 00:41:54 +07:00
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
status->busy_time = gpu->funcs->gpu_busy(gpu);
|
2018-01-11 00:41:54 +07:00
|
|
|
|
|
|
|
time = ktime_get();
|
|
|
|
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
|
|
|
|
gpu->devfreq.time = time;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
if (gpu->funcs->gpu_get_freq)
|
|
|
|
*freq = gpu->funcs->gpu_get_freq(gpu);
|
|
|
|
else
|
|
|
|
*freq = clk_get_rate(gpu->core_clk);
|
2018-01-11 00:41:54 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct devfreq_dev_profile msm_devfreq_profile = {
|
|
|
|
.polling_ms = 10,
|
|
|
|
.target = msm_devfreq_target,
|
|
|
|
.get_dev_status = msm_devfreq_get_dev_status,
|
|
|
|
.get_cur_freq = msm_devfreq_get_cur_freq,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void msm_devfreq_init(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
/* We need target support to do devfreq */
|
2018-10-04 16:41:42 +07:00
|
|
|
if (!gpu->funcs->gpu_busy)
|
2018-01-11 00:41:54 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
msm_devfreq_profile.initial_freq = gpu->fast_rate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't set the freq_table or max_state and let devfreq build the table
|
|
|
|
* from OPP
|
|
|
|
*/
|
|
|
|
|
|
|
|
gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
|
|
|
|
&msm_devfreq_profile, "simple_ondemand", NULL);
|
|
|
|
|
|
|
|
if (IS_ERR(gpu->devfreq.devfreq)) {
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
|
2018-01-11 00:41:54 +07:00
|
|
|
gpu->devfreq.devfreq = NULL;
|
|
|
|
}
|
2018-10-04 16:41:40 +07:00
|
|
|
|
|
|
|
devfreq_suspend_device(gpu->devfreq.devfreq);
|
2018-01-11 00:41:54 +07:00
|
|
|
}
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
static int enable_pwrrail(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = gpu->dev;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (gpu->gpu_reg) {
|
|
|
|
ret = regulator_enable(gpu->gpu_reg);
|
|
|
|
if (ret) {
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
|
2013-07-19 23:59:32 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpu->gpu_cx) {
|
|
|
|
ret = regulator_enable(gpu->gpu_cx);
|
|
|
|
if (ret) {
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
|
2013-07-19 23:59:32 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int disable_pwrrail(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
if (gpu->gpu_cx)
|
|
|
|
regulator_disable(gpu->gpu_cx);
|
|
|
|
if (gpu->gpu_reg)
|
|
|
|
regulator_disable(gpu->gpu_reg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int enable_clk(struct msm_gpu *gpu)
|
|
|
|
{
|
2017-03-08 00:02:56 +07:00
|
|
|
if (gpu->core_clk && gpu->fast_rate)
|
|
|
|
clk_set_rate(gpu->core_clk, gpu->fast_rate);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2016-11-29 02:28:33 +07:00
|
|
|
/* Set the RBBM timer rate to 19.2Mhz */
|
2017-03-08 00:02:56 +07:00
|
|
|
if (gpu->rbbmtimer_clk)
|
|
|
|
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
|
2016-11-29 02:28:33 +07:00
|
|
|
|
2018-08-07 00:33:21 +07:00
|
|
|
return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int disable_clk(struct msm_gpu *gpu)
|
|
|
|
{
|
2018-08-07 00:33:21 +07:00
|
|
|
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2017-03-08 00:02:54 +07:00
|
|
|
/*
|
|
|
|
* Set the clock to a deliberately low rate. On older targets the clock
|
|
|
|
* speed had to be non zero to avoid problems. On newer targets this
|
|
|
|
* will be rounded down to zero anyway so it all works out.
|
|
|
|
*/
|
2017-03-08 00:02:56 +07:00
|
|
|
if (gpu->core_clk)
|
|
|
|
clk_set_rate(gpu->core_clk, 27000000);
|
2016-11-29 02:28:31 +07:00
|
|
|
|
2017-03-08 00:02:56 +07:00
|
|
|
if (gpu->rbbmtimer_clk)
|
|
|
|
clk_set_rate(gpu->rbbmtimer_clk, 0);
|
2016-11-29 02:28:33 +07:00
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int enable_axi(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
if (gpu->ebi1_clk)
|
|
|
|
clk_prepare_enable(gpu->ebi1_clk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int disable_axi(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
if (gpu->ebi1_clk)
|
|
|
|
clk_disable_unprepare(gpu->ebi1_clk);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
gpu->devfreq.busy_cycles = 0;
|
|
|
|
gpu->devfreq.time = ktime_get();
|
|
|
|
|
|
|
|
devfreq_resume_device(gpu->devfreq.devfreq);
|
|
|
|
}
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
DBG("%s", gpu->name);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
|
|
|
ret = enable_pwrrail(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = enable_clk(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = enable_axi(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
msm_gpu_resume_devfreq(gpu);
|
2018-01-11 00:41:54 +07:00
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
gpu->needs_hw_init = true;
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
DBG("%s", gpu->name);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2018-10-04 16:41:42 +07:00
|
|
|
devfreq_suspend_device(gpu->devfreq.devfreq);
|
2018-01-11 00:41:54 +07:00
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
ret = disable_axi(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = disable_clk(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = disable_pwrrail(gpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
int msm_gpu_hw_init(struct msm_gpu *gpu)
|
2014-01-12 04:25:08 +07:00
|
|
|
{
|
2017-02-11 03:36:33 +07:00
|
|
|
int ret;
|
2014-01-12 04:25:08 +07:00
|
|
|
|
2017-06-13 20:15:36 +07:00
|
|
|
WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
if (!gpu->needs_hw_init)
|
|
|
|
return 0;
|
2014-01-12 04:25:08 +07:00
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
disable_irq(gpu->irq);
|
|
|
|
ret = gpu->funcs->hw_init(gpu);
|
|
|
|
if (!ret)
|
|
|
|
gpu->needs_hw_init = false;
|
|
|
|
enable_irq(gpu->irq);
|
2014-01-12 04:25:08 +07:00
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
return ret;
|
2014-01-12 04:25:08 +07:00
|
|
|
}
|
|
|
|
|
2018-07-24 23:33:27 +07:00
|
|
|
#ifdef CONFIG_DEV_COREDUMP
|
|
|
|
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
|
|
|
|
size_t count, void *data, size_t datalen)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = data;
|
|
|
|
struct drm_print_iterator iter;
|
|
|
|
struct drm_printer p;
|
|
|
|
struct msm_gpu_state *state;
|
|
|
|
|
|
|
|
state = msm_gpu_crashstate_get(gpu);
|
|
|
|
if (!state)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iter.data = buffer;
|
|
|
|
iter.offset = 0;
|
|
|
|
iter.start = offset;
|
|
|
|
iter.remain = count;
|
|
|
|
|
|
|
|
p = drm_coredump_printer(&iter);
|
|
|
|
|
|
|
|
drm_printf(&p, "---\n");
|
|
|
|
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
|
|
|
|
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
|
2018-07-26 19:39:25 +07:00
|
|
|
drm_printf(&p, "time: %lld.%09ld\n",
|
|
|
|
state->time.tv_sec, state->time.tv_nsec);
|
2018-07-24 23:33:27 +07:00
|
|
|
if (state->comm)
|
|
|
|
drm_printf(&p, "comm: %s\n", state->comm);
|
|
|
|
if (state->cmd)
|
|
|
|
drm_printf(&p, "cmdline: %s\n", state->cmd);
|
|
|
|
|
|
|
|
gpu->funcs->show(gpu, state, &p);
|
|
|
|
|
|
|
|
msm_gpu_crashstate_put(gpu);
|
|
|
|
|
|
|
|
return count - iter.remain;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msm_gpu_devcoredump_free(void *data)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = data;
|
|
|
|
|
|
|
|
msm_gpu_crashstate_put(gpu);
|
|
|
|
}
|
|
|
|
|
2018-07-24 23:33:31 +07:00
|
|
|
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
|
|
|
|
struct msm_gem_object *obj, u64 iova, u32 flags)
|
|
|
|
{
|
|
|
|
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
|
|
|
|
|
|
|
|
/* Don't record write only objects */
|
|
|
|
state_bo->size = obj->base.size;
|
|
|
|
state_bo->iova = iova;
|
|
|
|
|
2018-11-02 22:25:22 +07:00
|
|
|
/* Only store data for non imported buffer objects marked for read */
|
|
|
|
if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
|
2018-07-24 23:33:31 +07:00
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
|
|
|
|
if (!state_bo->data)
|
2018-11-02 22:25:22 +07:00
|
|
|
goto out;
|
2018-07-24 23:33:31 +07:00
|
|
|
|
|
|
|
ptr = msm_gem_get_vaddr_active(&obj->base);
|
|
|
|
if (IS_ERR(ptr)) {
|
|
|
|
kvfree(state_bo->data);
|
2018-11-02 22:25:22 +07:00
|
|
|
state_bo->data = NULL;
|
|
|
|
goto out;
|
2018-07-24 23:33:31 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(state_bo->data, ptr, obj->base.size);
|
|
|
|
msm_gem_put_vaddr(&obj->base);
|
|
|
|
}
|
2018-11-02 22:25:22 +07:00
|
|
|
out:
|
2018-07-24 23:33:31 +07:00
|
|
|
state->nr_bos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
|
|
|
struct msm_gem_submit *submit, char *comm, char *cmd)
|
2018-07-24 23:33:27 +07:00
|
|
|
{
|
|
|
|
struct msm_gpu_state *state;
|
|
|
|
|
2018-10-12 15:56:55 +07:00
|
|
|
/* Check if the target supports capturing crash state */
|
|
|
|
if (!gpu->funcs->gpu_state_get)
|
|
|
|
return;
|
|
|
|
|
2018-07-24 23:33:27 +07:00
|
|
|
/* Only save one crash state at a time */
|
|
|
|
if (gpu->crashstate)
|
|
|
|
return;
|
|
|
|
|
|
|
|
state = gpu->funcs->gpu_state_get(gpu);
|
|
|
|
if (IS_ERR_OR_NULL(state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Fill in the additional crash state information */
|
|
|
|
state->comm = kstrdup(comm, GFP_KERNEL);
|
|
|
|
state->cmd = kstrdup(cmd, GFP_KERNEL);
|
|
|
|
|
2018-07-24 23:33:31 +07:00
|
|
|
if (submit) {
|
|
|
|
int i;
|
|
|
|
|
2018-11-02 22:25:22 +07:00
|
|
|
state->bos = kcalloc(submit->nr_cmds,
|
2018-07-24 23:33:31 +07:00
|
|
|
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
|
|
|
|
|
2018-11-02 22:25:22 +07:00
|
|
|
for (i = 0; state->bos && i < submit->nr_cmds; i++) {
|
|
|
|
int idx = submit->cmd[i].idx;
|
|
|
|
|
|
|
|
msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
|
|
|
|
submit->bos[idx].iova, submit->bos[idx].flags);
|
|
|
|
}
|
2018-07-24 23:33:31 +07:00
|
|
|
}
|
|
|
|
|
2018-07-24 23:33:27 +07:00
|
|
|
/* Set the active crash state to be dumped on failure */
|
|
|
|
gpu->crashstate = state;
|
|
|
|
|
|
|
|
/* FIXME: Release the crashstate if this errors out? */
|
|
|
|
dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
|
|
|
|
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
|
|
|
|
}
|
|
|
|
#else
|
drm/msm/gpu: fix parameters in function msm_gpu_crashstate_capture
When CONFIG_DEV_COREDUMP isn't defined msm_gpu_crashstate_capture
doesn't pass the correct parameters.
drivers/gpu/drm/msm/msm_gpu.c: In function ‘recover_worker’:
drivers/gpu/drm/msm/msm_gpu.c:479:34: error: passing argument 2 of ‘msm_gpu_crashstate_capture’ from incompatible pointer type [-Werror=incompatible-pointer-types]
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
^~~~~~
drivers/gpu/drm/msm/msm_gpu.c:388:13: note: expected ‘char *’ but argument is of type ‘struct msm_gem_submit *’
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
^~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/gpu/drm/msm/msm_gpu.c:479:2: error: too many arguments to function ‘msm_gpu_crashstate_capture’
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
^~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/gpu/drm/msm/msm_gpu.c:388:13: note: declared here
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
In current code the function msm_gpu_crashstate_capture parameters.
Fixes: cdb95931dea3 ("drm/msm/gpu: Add the buffer objects from the submit to the crash dump")
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
Reviewed-By: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>
2018-08-01 03:45:32 +07:00
|
|
|
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
|
|
|
struct msm_gem_submit *submit, char *comm, char *cmd)
|
2018-07-24 23:33:27 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
/*
|
|
|
|
* Hangcheck detection for locked gpu:
|
|
|
|
*/
|
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
|
|
|
uint32_t fence)
|
|
|
|
{
|
|
|
|
struct msm_gem_submit *submit;
|
|
|
|
|
|
|
|
list_for_each_entry(submit, &ring->submits, node) {
|
|
|
|
if (submit->seqno > fence)
|
|
|
|
break;
|
|
|
|
|
|
|
|
msm_update_fence(submit->ring->fctx,
|
|
|
|
submit->fence->seqno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-13 21:17:18 +07:00
|
|
|
static struct msm_gem_submit *
|
|
|
|
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
|
|
|
|
{
|
|
|
|
struct msm_gem_submit *submit;
|
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
|
|
|
|
|
|
|
|
list_for_each_entry(submit, &ring->submits, node)
|
|
|
|
if (submit->seqno == fence)
|
|
|
|
return submit;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-03-16 05:26:28 +07:00
|
|
|
static void retire_submits(struct msm_gpu *gpu);
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
static void recover_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
|
|
|
|
struct drm_device *dev = gpu->dev;
|
2017-09-15 22:04:44 +07:00
|
|
|
struct msm_drm_private *priv = dev->dev_private;
|
2016-05-03 21:10:15 +07:00
|
|
|
struct msm_gem_submit *submit;
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
|
2018-07-24 23:33:26 +07:00
|
|
|
char *comm = NULL, *cmd = NULL;
|
2017-10-21 00:06:57 +07:00
|
|
|
int i;
|
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
|
2017-10-21 00:06:57 +07:00
|
|
|
|
2017-09-15 22:04:44 +07:00
|
|
|
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
|
2017-09-13 21:17:18 +07:00
|
|
|
if (submit) {
|
|
|
|
struct task_struct *task;
|
|
|
|
|
2019-03-23 03:21:22 +07:00
|
|
|
/* Increment the fault counts */
|
2019-04-17 06:13:28 +07:00
|
|
|
gpu->global_faults++;
|
2019-03-23 03:21:22 +07:00
|
|
|
submit->queue->faults++;
|
2019-04-17 06:13:28 +07:00
|
|
|
|
2018-10-12 15:56:56 +07:00
|
|
|
task = get_pid_task(submit->pid, PIDTYPE_PID);
|
2017-09-13 21:17:18 +07:00
|
|
|
if (task) {
|
2018-10-12 15:56:56 +07:00
|
|
|
comm = kstrdup(task->comm, GFP_KERNEL);
|
|
|
|
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
|
|
|
|
put_task_struct(task);
|
2018-07-24 23:33:26 +07:00
|
|
|
}
|
2017-09-13 21:17:18 +07:00
|
|
|
|
2018-07-24 23:33:26 +07:00
|
|
|
if (comm && cmd) {
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
|
2018-07-24 23:33:26 +07:00
|
|
|
gpu->name, comm, cmd);
|
2017-09-15 22:04:44 +07:00
|
|
|
|
|
|
|
msm_rd_dump_submit(priv->hangrd, submit,
|
2018-07-24 23:33:26 +07:00
|
|
|
"offending task: %s (%s)", comm, cmd);
|
|
|
|
} else
|
2017-09-15 22:04:44 +07:00
|
|
|
msm_rd_dump_submit(priv->hangrd, submit, NULL);
|
|
|
|
}
|
|
|
|
|
2018-07-24 23:33:27 +07:00
|
|
|
/* Record the crash state */
|
|
|
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
2018-07-24 23:33:31 +07:00
|
|
|
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
|
2018-07-24 23:33:27 +07:00
|
|
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
|
|
|
|
2018-07-24 23:33:26 +07:00
|
|
|
kfree(cmd);
|
|
|
|
kfree(comm);
|
2017-09-15 22:04:44 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update all the rings with the latest and greatest fence.. this
|
|
|
|
* needs to happen after msm_rd_dump_submit() to ensure that the
|
|
|
|
* bo's referenced by the offending submit are still around.
|
|
|
|
*/
|
2017-12-14 03:45:44 +07:00
|
|
|
for (i = 0; i < gpu->nr_rings; i++) {
|
2017-09-15 22:04:44 +07:00
|
|
|
struct msm_ringbuffer *ring = gpu->rb[i];
|
|
|
|
|
|
|
|
uint32_t fence = ring->memptrs->fence;
|
2017-09-13 21:17:18 +07:00
|
|
|
|
2017-09-15 22:04:44 +07:00
|
|
|
/*
|
|
|
|
* For the current (faulting?) ring/submit advance the fence by
|
|
|
|
* one more to clear the faulting submit
|
|
|
|
*/
|
|
|
|
if (ring == cur_ring)
|
|
|
|
fence++;
|
|
|
|
|
|
|
|
update_fences(gpu, ring, fence);
|
2016-05-03 21:10:15 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (msm_gpu_active(gpu)) {
|
2015-06-08 00:46:04 +07:00
|
|
|
/* retire completed submits, plus the one that hung: */
|
2016-03-16 05:26:28 +07:00
|
|
|
retire_submits(gpu);
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
2014-01-12 04:25:08 +07:00
|
|
|
gpu->funcs->recover(gpu);
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
/*
|
|
|
|
* Replay all remaining submits starting with highest priority
|
|
|
|
* ring
|
|
|
|
*/
|
2017-10-21 00:07:01 +07:00
|
|
|
for (i = 0; i < gpu->nr_rings; i++) {
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_ringbuffer *ring = gpu->rb[i];
|
|
|
|
|
|
|
|
list_for_each_entry(submit, &ring->submits, node)
|
|
|
|
gpu->funcs->submit(gpu, submit, NULL);
|
2015-06-08 00:46:04 +07:00
|
|
|
}
|
2014-01-12 04:25:08 +07:00
|
|
|
}
|
2016-05-03 21:10:15 +07:00
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
|
|
|
|
msm_gpu_retire(gpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hangcheck_timer_reset(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
DBG("%s", gpu->name);
|
|
|
|
mod_timer(&gpu->hangcheck_timer,
|
|
|
|
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
static void hangcheck_handler(struct timer_list *t)
|
2013-08-25 01:20:38 +07:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
|
2013-09-12 04:14:30 +07:00
|
|
|
struct drm_device *dev = gpu->dev;
|
|
|
|
struct msm_drm_private *priv = dev->dev_private;
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
|
|
|
|
uint32_t fence = ring->memptrs->fence;
|
2013-08-25 01:20:38 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
if (fence != ring->hangcheck_fence) {
|
2013-08-25 01:20:38 +07:00
|
|
|
/* some progress has been made.. ya! */
|
2017-10-21 00:06:57 +07:00
|
|
|
ring->hangcheck_fence = fence;
|
|
|
|
} else if (fence < ring->seqno) {
|
2013-08-25 01:20:38 +07:00
|
|
|
/* no progress and not done.. hung! */
|
2017-10-21 00:06:57 +07:00
|
|
|
ring->hangcheck_fence = fence;
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
|
2017-10-21 00:06:57 +07:00
|
|
|
gpu->name, ring->id);
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
|
2013-09-03 18:12:03 +07:00
|
|
|
gpu->name, fence);
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
|
2017-10-21 00:06:57 +07:00
|
|
|
gpu->name, ring->seqno);
|
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
queue_work(priv->wq, &gpu->recover_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if still more pending work, reset the hangcheck timer: */
|
2017-10-21 00:06:57 +07:00
|
|
|
if (ring->seqno > ring->hangcheck_fence)
|
2013-08-25 01:20:38 +07:00
|
|
|
hangcheck_timer_reset(gpu);
|
2013-09-12 04:14:30 +07:00
|
|
|
|
|
|
|
/* workaround for missing irq: */
|
|
|
|
queue_work(priv->wq, &gpu->retire_work);
|
2013-08-25 01:20:38 +07:00
|
|
|
}
|
|
|
|
|
2014-05-31 01:49:43 +07:00
|
|
|
/*
|
|
|
|
* Performance Counters:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* called under perf_lock */
|
|
|
|
static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
|
|
|
|
{
|
|
|
|
uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
|
|
|
|
int i, n = min(ncntrs, gpu->num_perfcntrs);
|
|
|
|
|
|
|
|
/* read current values: */
|
|
|
|
for (i = 0; i < gpu->num_perfcntrs; i++)
|
|
|
|
current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
|
|
|
|
|
|
|
|
/* update cntrs: */
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
|
|
|
|
|
|
|
|
/* save current values: */
|
|
|
|
for (i = 0; i < gpu->num_perfcntrs; i++)
|
|
|
|
gpu->last_cntrs[i] = current_cntrs[i];
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_sw_cntrs(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
ktime_t time;
|
|
|
|
uint32_t elapsed;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gpu->perf_lock, flags);
|
|
|
|
if (!gpu->perfcntr_active)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
time = ktime_get();
|
|
|
|
elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
|
|
|
|
|
|
|
|
gpu->totaltime += elapsed;
|
|
|
|
if (gpu->last_sample.active)
|
|
|
|
gpu->activetime += elapsed;
|
|
|
|
|
|
|
|
gpu->last_sample.active = msm_gpu_active(gpu);
|
|
|
|
gpu->last_sample.time = time;
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&gpu->perf_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
|
|
|
|
2014-05-31 01:49:43 +07:00
|
|
|
spin_lock_irqsave(&gpu->perf_lock, flags);
|
|
|
|
/* we could dynamically enable/disable perfcntr registers too.. */
|
|
|
|
gpu->last_sample.active = msm_gpu_active(gpu);
|
|
|
|
gpu->last_sample.time = ktime_get();
|
|
|
|
gpu->activetime = gpu->totaltime = 0;
|
|
|
|
gpu->perfcntr_active = true;
|
|
|
|
update_hw_cntrs(gpu, 0, NULL);
|
|
|
|
spin_unlock_irqrestore(&gpu->perf_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
gpu->perfcntr_active = false;
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_put_sync(&gpu->pdev->dev);
|
2014-05-31 01:49:43 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* returns -errno or # of cntrs sampled */
|
|
|
|
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
|
|
|
|
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gpu->perf_lock, flags);
|
|
|
|
|
|
|
|
if (!gpu->perfcntr_active) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
*activetime = gpu->activetime;
|
|
|
|
*totaltime = gpu->totaltime;
|
|
|
|
|
|
|
|
gpu->activetime = gpu->totaltime = 0;
|
|
|
|
|
|
|
|
ret = update_hw_cntrs(gpu, ncntrs, cntrs);
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&gpu->perf_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
/*
|
|
|
|
* Cmdstream submission/retirement:
|
|
|
|
*/
|
|
|
|
|
2018-11-02 22:25:21 +07:00
|
|
|
static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
|
|
|
struct msm_gem_submit *submit)
|
2016-03-17 03:07:38 +07:00
|
|
|
{
|
2018-11-02 22:25:21 +07:00
|
|
|
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
|
|
|
|
volatile struct msm_gpu_submit_stats *stats;
|
|
|
|
u64 elapsed, clock = 0;
|
2016-03-17 03:07:38 +07:00
|
|
|
int i;
|
|
|
|
|
2018-11-02 22:25:21 +07:00
|
|
|
stats = &ring->memptrs->stats[index];
|
|
|
|
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
|
|
|
|
elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
|
|
|
|
do_div(elapsed, 192);
|
|
|
|
|
|
|
|
/* Calculate the clock frequency from the number of CP cycles */
|
|
|
|
if (elapsed) {
|
|
|
|
clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
|
|
|
|
do_div(clock, elapsed);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_msm_gpu_submit_retired(submit, elapsed, clock,
|
|
|
|
stats->alwayson_start, stats->alwayson_end);
|
|
|
|
|
2016-03-17 03:07:38 +07:00
|
|
|
for (i = 0; i < submit->nr_bos; i++) {
|
|
|
|
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
|
|
|
/* move to inactive: */
|
|
|
|
msm_gem_move_to_inactive(&msm_obj->base);
|
2018-11-08 05:35:51 +07:00
|
|
|
msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
|
2018-01-26 10:55:54 +07:00
|
|
|
drm_gem_object_put(&msm_obj->base);
|
2016-03-17 03:07:38 +07:00
|
|
|
}
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_mark_last_busy(&gpu->pdev->dev);
|
|
|
|
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
2016-05-03 20:50:26 +07:00
|
|
|
msm_gem_submit_free(submit);
|
2016-03-17 03:07:38 +07:00
|
|
|
}
|
|
|
|
|
2016-03-16 05:26:28 +07:00
|
|
|
static void retire_submits(struct msm_gpu *gpu)
|
2015-06-08 00:46:04 +07:00
|
|
|
{
|
|
|
|
struct drm_device *dev = gpu->dev;
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_gem_submit *submit, *tmp;
|
|
|
|
int i;
|
2015-06-08 00:46:04 +07:00
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
/* Retire the commits starting with highest priority */
|
2017-10-21 00:07:01 +07:00
|
|
|
for (i = 0; i < gpu->nr_rings; i++) {
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_ringbuffer *ring = gpu->rb[i];
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
|
|
|
|
if (dma_fence_is_signaled(submit->fence))
|
2018-11-02 22:25:21 +07:00
|
|
|
retire_submit(gpu, ring, submit);
|
2015-06-08 00:46:04 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
static void retire_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
|
|
|
|
struct drm_device *dev = gpu->dev;
|
2017-10-21 00:06:57 +07:00
|
|
|
int i;
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
for (i = 0; i < gpu->nr_rings; i++)
|
|
|
|
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
|
2013-09-15 01:01:55 +07:00
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
mutex_lock(&dev->struct_mutex);
|
2016-03-16 05:26:28 +07:00
|
|
|
retire_submits(gpu);
|
2013-07-19 23:59:32 +07:00
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* call from irq handler to schedule work to retire bo's */
|
|
|
|
void msm_gpu_retire(struct msm_gpu *gpu)
|
|
|
|
{
|
|
|
|
struct msm_drm_private *priv = gpu->dev->dev_private;
|
|
|
|
queue_work(priv->wq, &gpu->retire_work);
|
2014-05-31 01:49:43 +07:00
|
|
|
update_sw_cntrs(gpu);
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add bo's to gpu's ring, and kick gpu: */
|
2016-06-17 03:37:38 +07:00
|
|
|
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
2013-07-19 23:59:32 +07:00
|
|
|
struct msm_file_private *ctx)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = gpu->dev;
|
|
|
|
struct msm_drm_private *priv = dev->dev_private;
|
2017-10-21 00:06:57 +07:00
|
|
|
struct msm_ringbuffer *ring = submit->ring;
|
2016-06-17 03:37:38 +07:00
|
|
|
int i;
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2015-06-08 00:46:04 +07:00
|
|
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
|
|
|
|
2017-02-11 03:36:33 +07:00
|
|
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
|
|
|
|
|
|
|
msm_gpu_hw_init(gpu);
|
2014-01-12 04:25:08 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
submit->seqno = ++ring->seqno;
|
|
|
|
|
|
|
|
list_add_tail(&submit->node, &ring->submits);
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2017-09-15 21:46:45 +07:00
|
|
|
msm_rd_dump_submit(priv->rd, submit, NULL);
|
2014-05-31 01:47:38 +07:00
|
|
|
|
2014-05-31 01:49:43 +07:00
|
|
|
update_sw_cntrs(gpu);
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
for (i = 0; i < submit->nr_bos; i++) {
|
|
|
|
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
2016-11-12 00:06:46 +07:00
|
|
|
uint64_t iova;
|
2013-07-19 23:59:32 +07:00
|
|
|
|
|
|
|
/* can't happen yet.. but when we add 2d support we'll have
|
|
|
|
* to deal w/ cross-ring synchronization:
|
|
|
|
*/
|
|
|
|
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
|
|
|
|
|
2016-03-17 03:07:38 +07:00
|
|
|
/* submit takes a reference to the bo and iova until retired: */
|
2018-01-26 10:55:54 +07:00
|
|
|
drm_gem_object_get(&msm_obj->base);
|
2018-11-08 05:35:50 +07:00
|
|
|
msm_gem_get_and_pin_iova(&msm_obj->base,
|
2017-06-13 22:07:08 +07:00
|
|
|
submit->gpu->aspace, &iova);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2013-09-02 00:25:09 +07:00
|
|
|
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
|
|
|
|
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
|
2016-03-16 05:26:28 +07:00
|
|
|
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
|
|
|
|
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
2015-06-08 00:46:04 +07:00
|
|
|
|
2016-05-03 20:46:49 +07:00
|
|
|
gpu->funcs->submit(gpu, submit, ctx);
|
2015-06-08 00:46:04 +07:00
|
|
|
priv->lastctx = ctx;
|
|
|
|
|
2013-08-25 01:20:38 +07:00
|
|
|
hangcheck_timer_reset(gpu);
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Init/Cleanup:
|
|
|
|
*/
|
|
|
|
|
|
|
|
static irqreturn_t irq_handler(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct msm_gpu *gpu = data;
|
|
|
|
return gpu->funcs->irq(gpu);
|
|
|
|
}
|
|
|
|
|
2017-03-08 00:02:56 +07:00
|
|
|
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
|
|
|
|
{
|
2018-08-07 00:33:21 +07:00
|
|
|
int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
|
2017-03-08 00:02:56 +07:00
|
|
|
|
2018-08-07 00:33:21 +07:00
|
|
|
if (ret < 1) {
|
2017-03-08 00:02:56 +07:00
|
|
|
gpu->nr_clocks = 0;
|
2018-08-07 00:33:21 +07:00
|
|
|
return ret;
|
2018-01-23 01:10:45 +07:00
|
|
|
}
|
2017-03-08 00:02:56 +07:00
|
|
|
|
2018-08-07 00:33:21 +07:00
|
|
|
gpu->nr_clocks = ret;
|
2017-03-08 00:02:56 +07:00
|
|
|
|
2018-08-07 00:33:21 +07:00
|
|
|
gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
|
|
|
|
gpu->nr_clocks, "core");
|
2017-03-08 00:02:56 +07:00
|
|
|
|
2018-08-07 00:33:21 +07:00
|
|
|
gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
|
|
|
|
gpu->nr_clocks, "rbbmtimer");
|
2017-03-08 00:02:56 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2017-07-27 23:42:39 +07:00
|
|
|
static struct msm_gem_address_space *
|
|
|
|
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
|
|
|
|
uint64_t va_start, uint64_t va_end)
|
|
|
|
{
|
|
|
|
struct msm_gem_address_space *aspace;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup IOMMU.. eventually we will (I think) do this once per context
|
|
|
|
* and have separate page tables per context. For now, to keep things
|
|
|
|
* simple and to get something working, just use a single address space:
|
|
|
|
*/
|
2018-11-15 05:08:04 +07:00
|
|
|
if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
|
|
|
|
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
|
|
|
|
if (!iommu)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
iommu->geometry.aperture_start = va_start;
|
|
|
|
iommu->geometry.aperture_end = va_end;
|
|
|
|
|
|
|
|
DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
|
|
|
|
|
|
|
|
aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
|
|
|
|
if (IS_ERR(aspace))
|
|
|
|
iommu_domain_free(iommu);
|
|
|
|
} else {
|
|
|
|
aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
|
|
|
|
va_start, va_end);
|
|
|
|
}
|
2017-07-27 23:42:39 +07:00
|
|
|
|
|
|
|
if (IS_ERR(aspace)) {
|
2018-11-15 05:08:04 +07:00
|
|
|
DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
|
2017-07-27 23:42:39 +07:00
|
|
|
PTR_ERR(aspace));
|
|
|
|
return ERR_CAST(aspace);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
|
|
|
|
if (ret) {
|
|
|
|
msm_gem_address_space_put(aspace);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
return aspace;
|
|
|
|
}
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
|
|
|
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
2017-05-09 03:35:03 +07:00
|
|
|
const char *name, struct msm_gpu_config *config)
|
2013-07-19 23:59:32 +07:00
|
|
|
{
|
2017-10-21 00:06:57 +07:00
|
|
|
int i, ret, nr_rings = config->nr_rings;
|
|
|
|
void *memptrs;
|
|
|
|
uint64_t memptrs_iova;
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2014-05-31 01:49:43 +07:00
|
|
|
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
|
|
|
|
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
gpu->dev = drm;
|
|
|
|
gpu->funcs = funcs;
|
|
|
|
gpu->name = name;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&gpu->active_list);
|
|
|
|
INIT_WORK(&gpu->retire_work, retire_worker);
|
2013-08-25 01:20:38 +07:00
|
|
|
INIT_WORK(&gpu->recover_work, recover_worker);
|
|
|
|
|
2015-06-08 00:46:04 +07:00
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 04:43:17 +07:00
|
|
|
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2014-05-31 01:49:43 +07:00
|
|
|
spin_lock_init(&gpu->perf_lock);
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
|
|
|
|
/* Map registers: */
|
2017-05-09 03:35:03 +07:00
|
|
|
gpu->mmio = msm_ioremap(pdev, config->ioname, name);
|
2013-07-19 23:59:32 +07:00
|
|
|
if (IS_ERR(gpu->mmio)) {
|
|
|
|
ret = PTR_ERR(gpu->mmio);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get Interrupt: */
|
2018-12-19 01:32:36 +07:00
|
|
|
gpu->irq = platform_get_irq(pdev, 0);
|
2013-07-19 23:59:32 +07:00
|
|
|
if (gpu->irq < 0) {
|
|
|
|
ret = gpu->irq;
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
|
2013-07-19 23:59:32 +07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
|
|
|
|
IRQF_TRIGGER_HIGH, gpu->name, gpu);
|
|
|
|
if (ret) {
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
|
2013-07-19 23:59:32 +07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2017-03-08 00:02:56 +07:00
|
|
|
ret = get_clocks(pdev, gpu);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2013-07-19 23:59:32 +07:00
|
|
|
|
2017-01-30 23:30:58 +07:00
|
|
|
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
|
2013-07-19 23:59:32 +07:00
|
|
|
DBG("ebi1_clk: %p", gpu->ebi1_clk);
|
|
|
|
if (IS_ERR(gpu->ebi1_clk))
|
|
|
|
gpu->ebi1_clk = NULL;
|
|
|
|
|
|
|
|
/* Acquire regulators: */
|
|
|
|
gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
|
|
|
|
DBG("gpu_reg: %p", gpu->gpu_reg);
|
|
|
|
if (IS_ERR(gpu->gpu_reg))
|
|
|
|
gpu->gpu_reg = NULL;
|
|
|
|
|
|
|
|
gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
|
|
|
|
DBG("gpu_cx: %p", gpu->gpu_cx);
|
|
|
|
if (IS_ERR(gpu->gpu_cx))
|
|
|
|
gpu->gpu_cx = NULL;
|
|
|
|
|
2017-07-27 23:42:39 +07:00
|
|
|
gpu->pdev = pdev;
|
|
|
|
platform_set_drvdata(pdev, gpu);
|
|
|
|
|
2018-01-11 00:41:54 +07:00
|
|
|
msm_devfreq_init(gpu);
|
|
|
|
|
2017-07-27 23:42:39 +07:00
|
|
|
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
|
|
|
|
config->va_start, config->va_end);
|
|
|
|
|
|
|
|
if (gpu->aspace == NULL)
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
2017-07-27 23:42:39 +07:00
|
|
|
else if (IS_ERR(gpu->aspace)) {
|
|
|
|
ret = PTR_ERR(gpu->aspace);
|
|
|
|
goto fail;
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
2014-07-11 22:59:22 +07:00
|
|
|
|
2018-11-02 22:25:18 +07:00
|
|
|
memptrs = msm_gem_kernel_new(drm,
|
|
|
|
sizeof(struct msm_rbmemptrs) * nr_rings,
|
2017-10-21 00:06:56 +07:00
|
|
|
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
|
2017-10-21 00:06:57 +07:00
|
|
|
&memptrs_iova);
|
2017-10-21 00:06:56 +07:00
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
if (IS_ERR(memptrs)) {
|
|
|
|
ret = PTR_ERR(memptrs);
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
|
2017-10-21 00:06:56 +07:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-11-08 05:35:52 +07:00
|
|
|
msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
|
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
|
2017-08-03 18:50:48 +07:00
|
|
|
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
|
2017-10-21 00:06:57 +07:00
|
|
|
ARRAY_SIZE(gpu->rb));
|
|
|
|
nr_rings = ARRAY_SIZE(gpu->rb);
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
/* Create ringbuffer(s): */
|
|
|
|
for (i = 0; i < nr_rings; i++) {
|
|
|
|
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
|
|
|
|
|
|
|
|
if (IS_ERR(gpu->rb[i])) {
|
|
|
|
ret = PTR_ERR(gpu->rb[i]);
|
2018-10-21 00:49:26 +07:00
|
|
|
DRM_DEV_ERROR(drm->dev,
|
2017-10-21 00:06:57 +07:00
|
|
|
"could not create ringbuffer %d: %d\n", i, ret);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
memptrs += sizeof(struct msm_rbmemptrs);
|
|
|
|
memptrs_iova += sizeof(struct msm_rbmemptrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
gpu->nr_rings = nr_rings;
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
2017-10-21 00:06:57 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
|
|
|
|
msm_ringbuffer_destroy(gpu->rb[i]);
|
|
|
|
gpu->rb[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-08 05:35:46 +07:00
|
|
|
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
|
2017-10-21 00:06:56 +07:00
|
|
|
|
2017-07-27 23:42:39 +07:00
|
|
|
platform_set_drvdata(pdev, NULL);
|
2013-07-19 23:59:32 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void msm_gpu_cleanup(struct msm_gpu *gpu)
|
|
|
|
{
|
2017-10-21 00:06:57 +07:00
|
|
|
int i;
|
|
|
|
|
2013-07-19 23:59:32 +07:00
|
|
|
DBG("%s", gpu->name);
|
|
|
|
|
|
|
|
WARN_ON(!list_empty(&gpu->active_list));
|
|
|
|
|
2017-10-21 00:06:57 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
|
|
|
|
msm_ringbuffer_destroy(gpu->rb[i]);
|
|
|
|
gpu->rb[i] = NULL;
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|
2017-10-21 00:06:56 +07:00
|
|
|
|
2018-11-08 05:35:46 +07:00
|
|
|
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
|
2017-10-21 00:06:56 +07:00
|
|
|
|
|
|
|
if (!IS_ERR_OR_NULL(gpu->aspace)) {
|
2017-07-27 23:42:39 +07:00
|
|
|
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
|
|
|
|
NULL, 0);
|
|
|
|
msm_gem_address_space_put(gpu->aspace);
|
|
|
|
}
|
2013-07-19 23:59:32 +07:00
|
|
|
}
|