2018-05-08 21:20:54 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2015-12-04 00:21:29 +07:00
|
|
|
/*
|
2018-05-08 21:20:54 +07:00
|
|
|
* Copyright (C) 2014-2018 Etnaviv Project
|
2015-12-04 00:21:29 +07:00
|
|
|
*/
|
|
|
|
|
2019-06-30 12:21:03 +07:00
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
|
2017-01-16 22:09:51 +07:00
|
|
|
#include "etnaviv_cmdbuf.h"
|
2015-12-04 00:21:29 +07:00
|
|
|
#include "etnaviv_gpu.h"
|
|
|
|
#include "etnaviv_gem.h"
|
|
|
|
#include "etnaviv_mmu.h"
|
|
|
|
|
|
|
|
#include "common.xml.h"
|
|
|
|
#include "state.xml.h"
|
2016-08-20 04:53:59 +07:00
|
|
|
#include "state_hi.xml.h"
|
2016-01-21 22:20:25 +07:00
|
|
|
#include "state_3d.xml.h"
|
2015-12-04 00:21:29 +07:00
|
|
|
#include "cmdstream.xml.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Command Buffer helper:
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
|
|
|
|
{
|
|
|
|
u32 *vaddr = (u32 *)buffer->vaddr;
|
|
|
|
|
|
|
|
BUG_ON(buffer->user_size >= buffer->size);
|
|
|
|
|
|
|
|
vaddr[buffer->user_size / 4] = data;
|
|
|
|
buffer->user_size += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
|
|
|
|
u32 reg, u32 value)
|
|
|
|
{
|
|
|
|
u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
|
|
|
|
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
/* write a register via cmd stream */
|
|
|
|
OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
|
|
|
|
VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
|
|
|
|
VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
|
|
|
|
OUT(buffer, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
|
|
|
|
{
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
OUT(buffer, VIV_FE_END_HEADER_OP_END);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
|
|
|
|
{
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
|
|
|
|
u16 prefetch, u32 address)
|
|
|
|
{
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
|
|
|
|
VIV_FE_LINK_HEADER_PREFETCH(prefetch));
|
|
|
|
OUT(buffer, address);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
|
|
|
|
u32 from, u32 to)
|
|
|
|
{
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
|
|
|
|
OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
|
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:14 +07:00
|
|
|
static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
|
|
|
|
{
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
|
|
|
|
VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
|
|
|
|
VIVS_GL_SEMAPHORE_TOKEN_TO(to));
|
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:30 +07:00
|
|
|
static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
|
|
|
|
struct etnaviv_cmdbuf *buffer, u8 pipe)
|
2015-12-04 00:21:29 +07:00
|
|
|
{
|
2016-01-21 22:20:30 +07:00
|
|
|
u32 flush = 0;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/*
|
|
|
|
* This assumes that if we're switching to 2D, we're switching
|
|
|
|
* away from 3D, and vice versa. Hence, if we're switching to
|
|
|
|
* the 2D core, we need to flush the 3D depth and color caches,
|
|
|
|
* otherwise we need to flush the 2D pixel engine cache.
|
|
|
|
*/
|
2016-01-21 22:20:30 +07:00
|
|
|
if (gpu->exec_state == ETNA_PIPE_2D)
|
2015-12-04 00:21:29 +07:00
|
|
|
flush = VIVS_GL_FLUSH_CACHE_PE2D;
|
2016-01-21 22:20:30 +07:00
|
|
|
else if (gpu->exec_state == ETNA_PIPE_3D)
|
|
|
|
flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
|
2016-01-21 22:20:14 +07:00
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
2015-12-04 00:21:29 +07:00
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
|
|
|
|
VIVS_GL_PIPE_SELECT_PIPE(pipe));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
|
|
|
|
struct etnaviv_cmdbuf *buf, u32 off, u32 len)
|
|
|
|
{
|
|
|
|
u32 size = buf->size;
|
|
|
|
u32 *ptr = buf->vaddr + off;
|
|
|
|
|
|
|
|
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
|
2019-07-06 00:17:27 +07:00
|
|
|
ptr, etnaviv_cmdbuf_get_va(buf,
|
|
|
|
&gpu->mmu_context->cmdbuf_mapping) +
|
2019-07-06 00:17:21 +07:00
|
|
|
off, size - len * 4 - off);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
|
|
|
ptr, len * 4, 0);
|
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:09 +07:00
|
|
|
/*
|
|
|
|
* Safely replace the WAIT of a waitlink with a new command and argument.
|
|
|
|
* The GPU may be executing this WAIT while we're modifying it, so we have
|
|
|
|
* to write it in a specific order to avoid the GPU branching to somewhere
|
|
|
|
* else. 'wl_offset' is the offset to the first byte of the WAIT command.
|
|
|
|
*/
|
|
|
|
static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
|
|
|
|
unsigned int wl_offset, u32 cmd, u32 arg)
|
|
|
|
{
|
|
|
|
u32 *lw = buffer->vaddr + wl_offset;
|
|
|
|
|
|
|
|
lw[1] = arg;
|
|
|
|
mb();
|
|
|
|
lw[0] = cmd;
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:04 +07:00
|
|
|
/*
|
|
|
|
* Ensure that there is space in the command buffer to contiguously write
|
|
|
|
* 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
|
|
|
|
*/
|
|
|
|
static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
|
|
|
|
struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
|
|
|
|
{
|
|
|
|
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
|
|
|
|
buffer->user_size = 0;
|
|
|
|
|
2019-07-06 00:17:27 +07:00
|
|
|
return etnaviv_cmdbuf_get_va(buffer,
|
|
|
|
&gpu->mmu_context->cmdbuf_mapping) +
|
2019-07-06 00:17:21 +07:00
|
|
|
buffer->user_size;
|
2016-01-21 22:20:04 +07:00
|
|
|
}
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
|
|
|
|
{
|
2017-11-24 22:56:37 +07:00
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* initialize buffer */
|
|
|
|
buffer->user_size = 0;
|
|
|
|
|
|
|
|
CMD_WAIT(buffer);
|
2019-07-06 00:17:27 +07:00
|
|
|
CMD_LINK(buffer, 2,
|
|
|
|
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
|
2019-07-06 00:17:21 +07:00
|
|
|
+ buffer->user_size - 4);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
return buffer->user_size / 8;
|
|
|
|
}
|
|
|
|
|
2016-08-20 04:53:59 +07:00
|
|
|
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
|
|
|
|
{
|
2017-11-24 22:56:37 +07:00
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
2016-08-20 04:53:59 +07:00
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2016-08-20 04:53:59 +07:00
|
|
|
buffer->user_size = 0;
|
|
|
|
|
|
|
|
if (gpu->identity.features & chipFeatures_PIPE_3D) {
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
|
|
|
|
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
|
|
|
|
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
|
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpu->identity.features & chipFeatures_PIPE_2D) {
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
|
|
|
|
VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
|
|
|
|
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
|
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
}
|
|
|
|
|
|
|
|
CMD_END(buffer);
|
|
|
|
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
return buffer->user_size / 8;
|
|
|
|
}
|
|
|
|
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
|
2018-01-22 18:28:10 +07:00
|
|
|
{
|
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
|
|
|
|
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
|
|
|
buffer->user_size = 0;
|
|
|
|
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
VIVS_MMUv2_PTA_CONFIG_INDEX(id));
|
2018-01-22 18:28:10 +07:00
|
|
|
|
|
|
|
CMD_END(buffer);
|
|
|
|
|
|
|
|
buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
|
|
|
|
|
|
return buffer->user_size / 8;
|
|
|
|
}
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
|
|
|
|
{
|
2017-11-24 22:56:37 +07:00
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
2016-01-21 22:20:25 +07:00
|
|
|
unsigned int waitlink_offset = buffer->user_size - 16;
|
|
|
|
u32 link_target, flush = 0;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2016-01-21 22:20:25 +07:00
|
|
|
if (gpu->exec_state == ETNA_PIPE_2D)
|
|
|
|
flush = VIVS_GL_FLUSH_CACHE_PE2D;
|
|
|
|
else if (gpu->exec_state == ETNA_PIPE_3D)
|
|
|
|
flush = VIVS_GL_FLUSH_CACHE_DEPTH |
|
|
|
|
VIVS_GL_FLUSH_CACHE_COLOR |
|
|
|
|
VIVS_GL_FLUSH_CACHE_TEXTURE |
|
|
|
|
VIVS_GL_FLUSH_CACHE_TEXTUREVS |
|
|
|
|
VIVS_GL_FLUSH_CACHE_SHADER_L2;
|
|
|
|
|
|
|
|
if (flush) {
|
|
|
|
unsigned int dwords = 7;
|
|
|
|
|
|
|
|
link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
|
|
|
|
|
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
|
|
|
|
if (gpu->exec_state == ETNA_PIPE_3D)
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
|
|
|
VIVS_TS_FLUSH_CACHE_FLUSH);
|
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_END(buffer);
|
|
|
|
|
|
|
|
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
|
|
|
VIV_FE_LINK_HEADER_OP_LINK |
|
|
|
|
VIV_FE_LINK_HEADER_PREFETCH(dwords),
|
|
|
|
link_target);
|
|
|
|
} else {
|
|
|
|
/* Replace the last link-wait with an "END" command */
|
|
|
|
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
|
|
|
VIV_FE_END_HEADER_OP_END, 0);
|
|
|
|
}
|
2015-12-04 00:21:29 +07:00
|
|
|
}
|
|
|
|
|
2017-09-24 20:15:28 +07:00
|
|
|
/* Append a 'sync point' to the ring buffer. */
|
|
|
|
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
|
|
|
|
{
|
2017-11-24 22:56:37 +07:00
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
2017-09-24 20:15:28 +07:00
|
|
|
unsigned int waitlink_offset = buffer->user_size - 16;
|
|
|
|
u32 dwords, target;
|
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2017-09-24 20:15:28 +07:00
|
|
|
/*
|
|
|
|
* We need at most 3 dwords in the return target:
|
|
|
|
* 1 event + 1 end + 1 wait + 1 link.
|
|
|
|
*/
|
|
|
|
dwords = 4;
|
|
|
|
target = etnaviv_buffer_reserve(gpu, buffer, dwords);
|
|
|
|
|
|
|
|
/* Signal sync point event */
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
|
|
|
VIVS_GL_EVENT_FROM_PE);
|
|
|
|
|
|
|
|
/* Stop the FE to 'pause' the GPU */
|
|
|
|
CMD_END(buffer);
|
|
|
|
|
|
|
|
/* Append waitlink */
|
|
|
|
CMD_WAIT(buffer);
|
2019-07-06 00:17:27 +07:00
|
|
|
CMD_LINK(buffer, 2,
|
|
|
|
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
|
2019-07-06 00:17:21 +07:00
|
|
|
+ buffer->user_size - 4);
|
2017-09-24 20:15:28 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Kick off the 'sync point' command by replacing the previous
|
|
|
|
* WAIT with a link to the address in the ring buffer.
|
|
|
|
*/
|
|
|
|
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
|
|
|
VIV_FE_LINK_HEADER_OP_LINK |
|
|
|
|
VIV_FE_LINK_HEADER_PREFETCH(dwords),
|
|
|
|
target);
|
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:40 +07:00
|
|
|
/* Append a command buffer to the ring buffer. */
|
2017-11-24 21:16:58 +07:00
|
|
|
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
2019-07-06 00:17:27 +07:00
|
|
|
struct etnaviv_iommu_context *mmu_context, unsigned int event,
|
|
|
|
struct etnaviv_cmdbuf *cmdbuf)
|
2015-12-04 00:21:29 +07:00
|
|
|
{
|
2017-11-24 22:56:37 +07:00
|
|
|
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
|
2016-01-21 22:20:09 +07:00
|
|
|
unsigned int waitlink_offset = buffer->user_size - 16;
|
2016-01-21 22:20:40 +07:00
|
|
|
u32 return_target, return_dwords;
|
2016-01-21 22:20:35 +07:00
|
|
|
u32 link_target, link_dwords;
|
2017-11-24 21:16:58 +07:00
|
|
|
bool switch_context = gpu->exec_state != exec_state;
|
2019-07-06 00:17:27 +07:00
|
|
|
bool switch_mmu_context = gpu->mmu_context != mmu_context;
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
|
2019-07-06 00:17:27 +07:00
|
|
|
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2017-11-17 23:51:19 +07:00
|
|
|
lockdep_assert_held(&gpu->lock);
|
|
|
|
|
2019-09-24 19:58:59 +07:00
|
|
|
if (drm_debug_enabled(DRM_UT_DRIVER))
|
2015-12-04 00:21:29 +07:00
|
|
|
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
|
|
|
|
2019-07-06 00:17:27 +07:00
|
|
|
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
|
|
|
|
&gpu->mmu_context->cmdbuf_mapping);
|
2016-01-21 22:20:40 +07:00
|
|
|
link_dwords = cmdbuf->size / 8;
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/*
|
2019-07-06 00:17:27 +07:00
|
|
|
* If we need maintenance prior to submitting this buffer, we will
|
2016-01-21 22:20:40 +07:00
|
|
|
* need to append a mmu flush load state, followed by a new
|
2015-12-04 00:21:29 +07:00
|
|
|
* link to this buffer - a total of four additional words.
|
|
|
|
*/
|
2019-07-06 00:17:23 +07:00
|
|
|
if (need_flush || switch_context) {
|
2016-01-21 22:20:40 +07:00
|
|
|
u32 target, extra_dwords;
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* link command */
|
2016-01-21 22:20:40 +07:00
|
|
|
extra_dwords = 1;
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* flush command */
|
2019-07-06 00:17:23 +07:00
|
|
|
if (need_flush) {
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
|
2016-08-20 05:01:24 +07:00
|
|
|
extra_dwords += 1;
|
|
|
|
else
|
|
|
|
extra_dwords += 3;
|
|
|
|
}
|
2016-01-21 22:20:40 +07:00
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* pipe switch commands */
|
2017-11-17 23:19:50 +07:00
|
|
|
if (switch_context)
|
2016-01-21 22:20:40 +07:00
|
|
|
extra_dwords += 4;
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-07-06 00:17:27 +07:00
|
|
|
/* PTA load command */
|
|
|
|
if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
|
|
|
|
extra_dwords += 1;
|
|
|
|
|
2016-01-21 22:20:40 +07:00
|
|
|
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
|
2019-07-06 00:17:27 +07:00
|
|
|
/*
|
|
|
|
* Switch MMU context if necessary. Must be done after the
|
|
|
|
* link target has been calculated, as the jump forward in the
|
|
|
|
* kernel ring still uses the last active MMU context before
|
|
|
|
* the switch.
|
|
|
|
*/
|
|
|
|
if (switch_mmu_context) {
|
|
|
|
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
|
|
|
|
|
|
|
|
etnaviv_iommu_context_get(mmu_context);
|
|
|
|
gpu->mmu_context = mmu_context;
|
|
|
|
etnaviv_iommu_context_put(old_context);
|
|
|
|
}
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-07-06 00:17:23 +07:00
|
|
|
if (need_flush) {
|
2015-12-04 00:21:29 +07:00
|
|
|
/* Add the MMU flush */
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
|
2016-08-20 05:01:24 +07:00
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
|
|
|
|
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
|
|
|
|
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
|
|
|
|
VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
|
|
|
|
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
|
|
|
|
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
|
|
|
|
} else {
|
2019-07-06 00:17:27 +07:00
|
|
|
u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
|
|
|
|
VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
|
|
|
|
|
|
|
|
if (switch_mmu_context &&
|
|
|
|
gpu->sec_mode == ETNA_SEC_KERNEL) {
|
|
|
|
unsigned short id =
|
|
|
|
etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
|
|
|
|
CMD_LOAD_STATE(buffer,
|
|
|
|
VIVS_MMUv2_PTA_CONFIG,
|
|
|
|
VIVS_MMUv2_PTA_CONFIG_INDEX(id));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpu->sec_mode == ETNA_SEC_NONE)
|
|
|
|
flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
|
|
|
|
|
2016-08-20 05:01:24 +07:00
|
|
|
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
|
2019-07-06 00:17:27 +07:00
|
|
|
flush);
|
2016-08-20 05:01:24 +07:00
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE,
|
|
|
|
SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE,
|
|
|
|
SYNC_RECIPIENT_PE);
|
|
|
|
}
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-07-06 00:17:23 +07:00
|
|
|
gpu->flush_seq = new_flush_seq;
|
2015-12-04 00:21:29 +07:00
|
|
|
}
|
|
|
|
|
2017-11-17 23:19:50 +07:00
|
|
|
if (switch_context) {
|
2017-11-24 21:16:58 +07:00
|
|
|
etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
|
|
|
|
gpu->exec_state = exec_state;
|
2015-12-04 00:21:29 +07:00
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:35 +07:00
|
|
|
/* And the link to the submitted buffer */
|
2019-07-06 00:17:27 +07:00
|
|
|
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
|
|
|
|
&gpu->mmu_context->cmdbuf_mapping);
|
2016-01-21 22:20:35 +07:00
|
|
|
CMD_LINK(buffer, link_dwords, link_target);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
|
|
|
/* Update the link target to point to above instructions */
|
2016-01-21 22:20:40 +07:00
|
|
|
link_target = target;
|
|
|
|
link_dwords = extra_dwords;
|
2015-12-04 00:21:29 +07:00
|
|
|
}
|
|
|
|
|
2016-01-21 22:20:40 +07:00
|
|
|
/*
|
|
|
|
* Append a LINK to the submitted command buffer to return to
|
|
|
|
* the ring buffer. return_target is the ring target address.
|
2016-10-05 23:30:43 +07:00
|
|
|
* We need at most 7 dwords in the return target: 2 cache flush +
|
|
|
|
* 2 semaphore stall + 1 event + 1 wait + 1 link.
|
2016-01-21 22:20:40 +07:00
|
|
|
*/
|
2016-10-05 23:30:43 +07:00
|
|
|
return_dwords = 7;
|
2016-01-21 22:20:40 +07:00
|
|
|
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
|
|
|
|
CMD_LINK(cmdbuf, return_dwords, return_target);
|
|
|
|
|
|
|
|
/*
|
2016-10-05 23:30:43 +07:00
|
|
|
* Append a cache flush, stall, event, wait and link pointing back to
|
|
|
|
* the wait command to the ring buffer.
|
2016-01-21 22:20:40 +07:00
|
|
|
*/
|
2016-10-05 23:30:43 +07:00
|
|
|
if (gpu->exec_state == ETNA_PIPE_2D) {
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
|
|
|
VIVS_GL_FLUSH_CACHE_PE2D);
|
|
|
|
} else {
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
|
|
|
VIVS_GL_FLUSH_CACHE_DEPTH |
|
|
|
|
VIVS_GL_FLUSH_CACHE_COLOR);
|
|
|
|
CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
|
|
|
|
VIVS_TS_FLUSH_CACHE_FLUSH);
|
|
|
|
}
|
|
|
|
CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
|
|
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
2015-12-04 00:21:29 +07:00
|
|
|
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
|
|
|
VIVS_GL_EVENT_FROM_PE);
|
|
|
|
CMD_WAIT(buffer);
|
2019-07-06 00:17:27 +07:00
|
|
|
CMD_LINK(buffer, 2,
|
|
|
|
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
|
2019-07-06 00:17:21 +07:00
|
|
|
+ buffer->user_size - 4);
|
2016-01-21 22:20:40 +07:00
|
|
|
|
2019-09-24 19:58:59 +07:00
|
|
|
if (drm_debug_enabled(DRM_UT_DRIVER))
|
2016-01-21 22:20:40 +07:00
|
|
|
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
|
2019-07-06 00:17:21 +07:00
|
|
|
return_target,
|
2019-07-06 00:17:27 +07:00
|
|
|
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
|
2017-01-16 22:52:44 +07:00
|
|
|
cmdbuf->vaddr);
|
2016-01-21 22:20:40 +07:00
|
|
|
|
2019-09-24 19:58:59 +07:00
|
|
|
if (drm_debug_enabled(DRM_UT_DRIVER)) {
|
2016-01-21 22:20:40 +07:00
|
|
|
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
|
|
|
cmdbuf->vaddr, cmdbuf->size, 0);
|
|
|
|
|
|
|
|
pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
|
|
|
|
pr_info("addr: 0x%08x\n", link_target);
|
|
|
|
pr_info("back: 0x%08x\n", return_target);
|
|
|
|
pr_info("event: %d\n", event);
|
|
|
|
}
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2016-01-21 22:20:40 +07:00
|
|
|
/*
|
|
|
|
* Kick off the submitted command by replacing the previous
|
|
|
|
* WAIT with a link to the address in the ring buffer.
|
|
|
|
*/
|
2016-01-21 22:20:09 +07:00
|
|
|
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
|
|
|
|
VIV_FE_LINK_HEADER_OP_LINK |
|
2016-01-21 22:20:35 +07:00
|
|
|
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
|
2016-01-21 22:20:09 +07:00
|
|
|
link_target);
|
2015-12-04 00:21:29 +07:00
|
|
|
|
2019-09-24 19:58:59 +07:00
|
|
|
if (drm_debug_enabled(DRM_UT_DRIVER))
|
2015-12-04 00:21:29 +07:00
|
|
|
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
|
|
|
}
|