2018-05-08 21:20:54 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2015-12-04 00:21:29 +07:00
|
|
|
/*
|
2018-05-08 21:20:54 +07:00
|
|
|
* Copyright (C) 2015-2018 Etnaviv Project
|
2015-12-04 00:21:29 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/devcoredump.h>
|
2019-07-04 17:43:37 +07:00
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
|
2017-01-16 22:09:51 +07:00
|
|
|
#include "etnaviv_cmdbuf.h"
|
2015-12-04 00:21:29 +07:00
|
|
|
#include "etnaviv_dump.h"
|
|
|
|
#include "etnaviv_gem.h"
|
|
|
|
#include "etnaviv_gpu.h"
|
|
|
|
#include "etnaviv_mmu.h"
|
2017-12-06 16:53:27 +07:00
|
|
|
#include "etnaviv_sched.h"
|
2015-12-04 00:21:29 +07:00
|
|
|
#include "state.xml.h"
|
|
|
|
#include "state_hi.xml.h"
|
|
|
|
|
2017-12-06 16:53:27 +07:00
|
|
|
static bool etnaviv_dump_core = true;
|
|
|
|
module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
struct core_dump_iterator {
|
|
|
|
void *start;
|
|
|
|
struct etnaviv_dump_object_header *hdr;
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const unsigned short etnaviv_dump_registers[] = {
|
|
|
|
VIVS_HI_AXI_STATUS,
|
|
|
|
VIVS_HI_CLOCK_CONTROL,
|
|
|
|
VIVS_HI_IDLE_STATE,
|
|
|
|
VIVS_HI_AXI_CONFIG,
|
|
|
|
VIVS_HI_INTR_ENBL,
|
|
|
|
VIVS_HI_CHIP_IDENTITY,
|
|
|
|
VIVS_HI_CHIP_FEATURE,
|
|
|
|
VIVS_HI_CHIP_MODEL,
|
|
|
|
VIVS_HI_CHIP_REV,
|
|
|
|
VIVS_HI_CHIP_DATE,
|
|
|
|
VIVS_HI_CHIP_TIME,
|
|
|
|
VIVS_HI_CHIP_MINOR_FEATURE_0,
|
|
|
|
VIVS_HI_CACHE_CONTROL,
|
|
|
|
VIVS_HI_AXI_CONTROL,
|
|
|
|
VIVS_PM_POWER_CONTROLS,
|
|
|
|
VIVS_PM_MODULE_CONTROLS,
|
|
|
|
VIVS_PM_MODULE_STATUS,
|
|
|
|
VIVS_PM_PULSE_EATER,
|
|
|
|
VIVS_MC_MMU_FE_PAGE_TABLE,
|
|
|
|
VIVS_MC_MMU_TX_PAGE_TABLE,
|
|
|
|
VIVS_MC_MMU_PE_PAGE_TABLE,
|
|
|
|
VIVS_MC_MMU_PEZ_PAGE_TABLE,
|
|
|
|
VIVS_MC_MMU_RA_PAGE_TABLE,
|
|
|
|
VIVS_MC_DEBUG_MEMORY,
|
|
|
|
VIVS_MC_MEMORY_BASE_ADDR_RA,
|
|
|
|
VIVS_MC_MEMORY_BASE_ADDR_FE,
|
|
|
|
VIVS_MC_MEMORY_BASE_ADDR_TX,
|
|
|
|
VIVS_MC_MEMORY_BASE_ADDR_PEZ,
|
|
|
|
VIVS_MC_MEMORY_BASE_ADDR_PE,
|
|
|
|
VIVS_MC_MEMORY_TIMING_CONTROL,
|
|
|
|
VIVS_MC_BUS_CONFIG,
|
|
|
|
VIVS_FE_DMA_STATUS,
|
|
|
|
VIVS_FE_DMA_DEBUG_STATE,
|
|
|
|
VIVS_FE_DMA_ADDRESS,
|
|
|
|
VIVS_FE_DMA_LOW,
|
|
|
|
VIVS_FE_DMA_HIGH,
|
|
|
|
VIVS_FE_AUTO_FLUSH,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
|
|
|
|
u32 type, void *data_end)
|
|
|
|
{
|
|
|
|
struct etnaviv_dump_object_header *hdr = iter->hdr;
|
|
|
|
|
|
|
|
hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
|
|
|
|
hdr->type = cpu_to_le32(type);
|
|
|
|
hdr->file_offset = cpu_to_le32(iter->data - iter->start);
|
|
|
|
hdr->file_size = cpu_to_le32(data_end - iter->data);
|
|
|
|
|
|
|
|
iter->hdr++;
|
|
|
|
iter->data += hdr->file_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
|
|
|
struct etnaviv_gpu *gpu)
|
|
|
|
{
|
|
|
|
struct etnaviv_dump_registers *reg = iter->data;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
|
|
|
|
reg->reg = etnaviv_dump_registers[i];
|
|
|
|
reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
struct etnaviv_iommu_context *mmu, size_t mmu_size)
|
2015-12-04 00:21:29 +07:00
|
|
|
{
|
2019-08-09 18:59:25 +07:00
|
|
|
etnaviv_iommu_dump(mmu, iter->data);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
|
|
|
|
void *ptr, size_t size, u64 iova)
|
|
|
|
{
|
|
|
|
memcpy(iter->data, ptr, size);
|
|
|
|
|
|
|
|
iter->hdr->iova = cpu_to_le64(iova);
|
|
|
|
|
|
|
|
etnaviv_core_dump_header(iter, type, iter->data + size);
|
|
|
|
}
|
|
|
|
|
2019-08-09 18:58:02 +07:00
|
|
|
void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
2015-12-04 00:21:29 +07:00
|
|
|
{
|
2019-08-09 18:58:02 +07:00
|
|
|
struct etnaviv_gpu *gpu = submit->gpu;
|
2015-12-04 00:21:29 +07:00
|
|
|
struct core_dump_iterator iter;
|
|
|
|
struct etnaviv_gem_object *obj;
|
|
|
|
unsigned int n_obj, n_bomap_pages;
|
|
|
|
size_t file_size, mmu_size;
|
|
|
|
__le64 *bomap, *bomap_start;
|
2019-08-09 18:58:02 +07:00
|
|
|
int i;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2017-12-06 16:53:27 +07:00
|
|
|
/* Only catch the first event, or when manually re-armed */
|
|
|
|
if (!etnaviv_dump_core)
|
|
|
|
return;
|
|
|
|
etnaviv_dump_core = false;
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
mutex_lock(&gpu->mmu_context->lock);
|
2019-05-21 19:53:40 +07:00
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-08-09 18:58:02 +07:00
|
|
|
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
|
|
|
|
n_obj = 5;
|
2015-12-04 00:21:29 +07:00
|
|
|
n_bomap_pages = 0;
|
|
|
|
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
|
|
|
|
sizeof(struct etnaviv_dump_registers) +
|
2019-08-09 18:58:02 +07:00
|
|
|
mmu_size + gpu->buffer.size + submit->cmdbuf.size;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
/* Add in the active buffer objects */
|
2019-08-09 18:58:02 +07:00
|
|
|
for (i = 0; i < submit->nr_bos; i++) {
|
|
|
|
obj = submit->bos[i].obj;
|
2015-12-04 00:21:29 +07:00
|
|
|
file_size += obj->base.size;
|
|
|
|
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
|
|
|
|
n_obj++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we have any buffer objects, add a bomap object */
|
|
|
|
if (n_bomap_pages) {
|
|
|
|
file_size += n_bomap_pages * sizeof(__le64);
|
|
|
|
n_obj++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add the size of the headers */
|
|
|
|
file_size += sizeof(*iter.hdr) * n_obj;
|
|
|
|
|
|
|
|
/* Allocate the file in vmalloc memory, it's likely to be big */
|
2017-05-09 05:57:44 +07:00
|
|
|
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
|
|
|
|
PAGE_KERNEL);
|
2015-12-04 00:21:29 +07:00
|
|
|
if (!iter.start) {
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
mutex_unlock(&gpu->mmu_context->lock);
|
2015-12-04 00:21:29 +07:00
|
|
|
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Point the data member after the headers */
|
|
|
|
iter.hdr = iter.start;
|
|
|
|
iter.data = &iter.hdr[n_obj];
|
|
|
|
|
|
|
|
memset(iter.hdr, 0, iter.data - iter.start);
|
|
|
|
|
|
|
|
etnaviv_core_dump_registers(&iter, gpu);
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
|
2017-11-24 22:56:37 +07:00
|
|
|
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
|
|
|
|
gpu->buffer.size,
|
2019-07-06 00:17:21 +07:00
|
|
|
etnaviv_cmdbuf_get_va(&gpu->buffer,
|
2019-07-06 00:17:27 +07:00
|
|
|
&gpu->mmu_context->cmdbuf_mapping));
|
2017-11-24 22:56:37 +07:00
|
|
|
|
2019-08-09 18:58:02 +07:00
|
|
|
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
|
|
|
|
submit->cmdbuf.vaddr, submit->cmdbuf.size,
|
2019-07-06 00:17:21 +07:00
|
|
|
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
|
2019-07-06 00:17:27 +07:00
|
|
|
&gpu->mmu_context->cmdbuf_mapping));
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-10-16 20:37:06 +07:00
|
|
|
mutex_unlock(&gpu->mmu_context->lock);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* Reserve space for the bomap */
|
|
|
|
if (n_bomap_pages) {
|
|
|
|
bomap_start = bomap = iter.data;
|
|
|
|
memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
|
|
|
|
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
|
|
|
|
bomap + n_bomap_pages);
|
|
|
|
} else {
|
|
|
|
/* Silence warning */
|
|
|
|
bomap_start = bomap = NULL;
|
|
|
|
}
|
|
|
|
|
2019-08-09 18:58:02 +07:00
|
|
|
for (i = 0; i < submit->nr_bos; i++) {
|
|
|
|
struct etnaviv_vram_mapping *vram;
|
2015-12-04 00:21:29 +07:00
|
|
|
struct page **pages;
|
|
|
|
void *vaddr;
|
|
|
|
|
2019-08-09 18:58:02 +07:00
|
|
|
obj = submit->bos[i].obj;
|
|
|
|
vram = submit->bos[i].mapping;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2016-01-22 18:03:03 +07:00
|
|
|
mutex_lock(&obj->lock);
|
2015-12-04 00:21:29 +07:00
|
|
|
pages = etnaviv_gem_get_pages(obj);
|
2016-01-22 18:03:03 +07:00
|
|
|
mutex_unlock(&obj->lock);
|
2019-01-14 17:49:46 +07:00
|
|
|
if (!IS_ERR(pages)) {
|
2015-12-04 00:21:29 +07:00
|
|
|
int j;
|
|
|
|
|
|
|
|
iter.hdr->data[0] = bomap - bomap_start;
|
|
|
|
|
|
|
|
for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
|
|
|
|
*bomap++ = cpu_to_le64(page_to_phys(*pages++));
|
|
|
|
}
|
|
|
|
|
|
|
|
iter.hdr->iova = cpu_to_le64(vram->iova);
|
|
|
|
|
2016-01-27 00:10:32 +07:00
|
|
|
vaddr = etnaviv_gem_vmap(&obj->base);
|
2016-01-25 21:37:28 +07:00
|
|
|
if (vaddr)
|
2015-12-04 00:21:29 +07:00
|
|
|
memcpy(iter.data, vaddr, obj->base.size);
|
|
|
|
|
|
|
|
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
|
|
|
|
obj->base.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
|
|
|
|
|
|
|
|
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
|
|
|
|
}
|