linux_dsm_epyc7002/drivers/gpu/drm/etnaviv/etnaviv_drv.h

130 lines
4.3 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015-2018 Etnaviv Project
*/
#ifndef __ETNAVIV_DRV_H__
#define __ETNAVIV_DRV_H__
#include <linux/list.h>
#include <linux/mm_types.h>
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
#include <drm/gpu_scheduler.h>
struct etnaviv_cmdbuf;
struct etnaviv_gpu;
struct etnaviv_mmu;
struct etnaviv_gem_object;
struct etnaviv_gem_submit;
drm/etnaviv: rework MMU handling This reworks the MMU handling to make it possible to have multiple MMU contexts. A context is basically one instance of GPU page tables. Currently we have one set of page tables per GPU, which isn't all that clever, as it has the following two consequences: 1. All GPU clients (aka processes) are sharing the same pagetables, which means there is no isolation between clients, but only between GPU assigned memory spaces and the rest of the system. Better than nothing, but also not great. 2. Clients operating on the same set of buffers with different etnaviv GPU cores, e.g. a workload using both the 2D and 3D GPU, need to map the used buffers into the pagetable sets of each used GPU. This patch reworks all the MMU handling to introduce the abstraction of the MMU context. A context can be shared across different GPU cores, as long as they have compatible MMU implementations, which is the case for all systems with Vivante GPUs seen in the wild. As MMUv1 is not able to change pagetables on the fly, without a "stop the world" operation, which stops GPU, changes pagetables via CPU interaction, restarts GPU, the implementation introduces a shared context on MMUv1, which is returned whenever there is a request for a new context. This patch assigns a MMU context to each GPU, so on MMUv2 systems there is still one set of pagetables per GPU, but due to the shared context MMUv1 systems see a change in behavior as now a single pagetable set is used across all GPU cores. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
struct etnaviv_iommu_global;
#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
struct etnaviv_file_private {
struct etnaviv_iommu_context *mmu;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
struct etnaviv_drm_private {
int num_gpus;
drm: etnaviv: avoid DMA API warning when importing buffers During boot, I get this kernel warning: WARNING: CPU: 0 PID: 19001 at kernel/dma/debug.c:1301 debug_dma_map_sg+0x284/0x3dc etnaviv etnaviv: DMA-API: mapping sg segment longer than device claims to support [len=3145728] [max=65536] Modules linked in: ip6t_REJECT nf_reject_ipv6 ip6t_rpfilter xt_tcpudp ipt_REJECT nf_reject_ipv4 xt_conntrack ip_set nfnetlink ebtable_broute ebtable_nat ip6table_raw ip6table_nat nf_nat_ipv6 ip6table_mangle iptable_raw iptable_nat nf_nat_ipv4 nf_nat nf_conntrack nf_defrag_ipv4 nf_defrag_ipv6 libcrc32c iptable_mangle ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter caam_jr error snd_soc_imx_spdif imx_thermal snd_soc_imx_audmux nvmem_imx_ocotp snd_soc_sgtl5000 caam imx_sdma virt_dma coda rc_cec v4l2_mem2mem snd_soc_fsl_ssi snd_soc_fsl_spdif imx_vdoa imx_pcm_dma videobuf2_dma_contig etnaviv dw_hdmi_cec gpu_sched dw_hdmi_ahb_audio imx6q_cpufreq nfsd sch_fq_codel ip_tables x_tables CPU: 0 PID: 19001 Comm: Xorg Not tainted 4.20.0+ #307 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) [<c0019658>] (unwind_backtrace) from [<c001489c>] (show_stack+0x10/0x14) [<c001489c>] (show_stack) from [<c07fb420>] (dump_stack+0x9c/0xd4) [<c07fb420>] (dump_stack) from [<c00312dc>] (__warn+0xf8/0x124) [<c00312dc>] (__warn) from [<c00313d0>] (warn_slowpath_fmt+0x38/0x48) [<c00313d0>] (warn_slowpath_fmt) from [<c00b14e8>] (debug_dma_map_sg+0x284/0x3dc) [<c00b14e8>] (debug_dma_map_sg) from [<c046eb40>] (drm_gem_map_dma_buf+0xc4/0x13c) [<c046eb40>] (drm_gem_map_dma_buf) from [<c04c3314>] (dma_buf_map_attachment+0x38/0x5c) [<c04c3314>] (dma_buf_map_attachment) from [<c046e728>] (drm_gem_prime_import_dev+0x74/0x104) [<c046e728>] (drm_gem_prime_import_dev) from [<c046e5bc>] (drm_gem_prime_fd_to_handle+0x84/0x17c) [<c046e5bc>] (drm_gem_prime_fd_to_handle) from [<c046edd0>] (drm_prime_fd_to_handle_ioctl+0x38/0x4c) [<c046edd0>] (drm_prime_fd_to_handle_ioctl) from [<c0460efc>] (drm_ioctl_kernel+0x90/0xc8) [<c0460efc>] (drm_ioctl_kernel) from [<c0461114>] (drm_ioctl+0x1e0/0x3b0) [<c0461114>] (drm_ioctl) from [<c01cae20>] (do_vfs_ioctl+0x90/0xa48) [<c01cae20>] (do_vfs_ioctl) from [<c01cb80c>] (ksys_ioctl+0x34/0x60) [<c01cb80c>] (ksys_ioctl) from [<c0009000>] (ret_fast_syscall+0x0/0x28) Exception stack(0xd81a9fa8 to 0xd81a9ff0) 9fa0: b6c69c88 bec613f8 00000009 c00c642e bec613f8 b86c4600 9fc0: b6c69c88 bec613f8 c00c642e 00000036 012762e0 01276348 00000300 012d91f8 9fe0: b6989f18 bec613dc b697185c b667be5c irq event stamp: 47905 hardirqs last enabled at (47913): [<c0098824>] console_unlock+0x46c/0x680 hardirqs last disabled at (47922): [<c0098470>] console_unlock+0xb8/0x680 softirqs last enabled at (47754): [<c000a484>] __do_softirq+0x344/0x540 softirqs last disabled at (47701): [<c0038700>] irq_exit+0x124/0x144 ---[ end trace af477747acbcc642 ]--- The reason is the contiguous buffer exceeds the default maximum segment size of 64K as specified by dma_get_max_seg_size() in linux/dma-mapping.h. Fix this by providing our own segment size, which is set to 2GiB to cover the window found in MMUv1 GPUs. Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
2019-02-25 17:51:30 +07:00
struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
gfp_t shm_gfp_mask;
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
drm/etnaviv: rework MMU handling This reworks the MMU handling to make it possible to have multiple MMU contexts. A context is basically one instance of GPU page tables. Currently we have one set of page tables per GPU, which isn't all that clever, as it has the following two consequences: 1. All GPU clients (aka processes) are sharing the same pagetables, which means there is no isolation between clients, but only between GPU assigned memory spaces and the rest of the system. Better than nothing, but also not great. 2. Clients operating on the same set of buffers with different etnaviv GPU cores, e.g. a workload using both the 2D and 3D GPU, need to map the used buffers into the pagetable sets of each used GPU. This patch reworks all the MMU handling to introduce the abstraction of the MMU context. A context can be shared across different GPU cores, as long as they have compatible MMU implementations, which is the case for all systems with Vivante GPUs seen in the wild. As MMUv1 is not able to change pagetables on the fly, without a "stop the world" operation, which stops GPU, changes pagetables via CPU interaction, restarts GPU, the implementation introduces a shared context on MMUv1, which is returned whenever there is a request for a new context. This patch assigns a MMU context to each GPU, so on MMUv2 systems there is still one set of pagetables per GPU, but due to the shared context MMUv1 systems see a change in behavior as now a single pagetable set is used across all GPU cores. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
struct etnaviv_iommu_global *mmu_global;
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
};
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
drm/etnaviv: rework MMU handling This reworks the MMU handling to make it possible to have multiple MMU contexts. A context is basically one instance of GPU page tables. Currently we have one set of page tables per GPU, which isn't all that clever, as it has the following two consequences: 1. All GPU clients (aka processes) are sharing the same pagetables, which means there is no isolation between clients, but only between GPU assigned memory spaces and the rest of the system. Better than nothing, but also not great. 2. Clients operating on the same set of buffers with different etnaviv GPU cores, e.g. a workload using both the 2D and 3D GPU, need to map the used buffers into the pagetable sets of each used GPU. This patch reworks all the MMU handling to introduce the abstraction of the MMU context. A context can be shared across different GPU cores, as long as they have compatible MMU implementations, which is the case for all systems with Vivante GPUs seen in the wild. As MMUv1 is not able to change pagetables on the fly, without a "stop the world" operation, which stops GPU, changes pagetables via CPU interaction, restarts GPU, the implementation introduces a shared context on MMUv1, which is returned whenever there is a request for a new context. This patch assigns a MMU context to each GPU, so on MMUv2 systems there is still one set of pagetables per GPU, but due to the shared context MMUv1 systems see a change in behavior as now a single pagetable set is used across all GPU cores. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
struct etnaviv_iommu_context *mmu,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
u32 *stream, unsigned int size,
struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
#ifdef CONFIG_DEBUG_FS
void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
struct seq_file *m);
#endif
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
/*
* Return the storage size of a structure with a variable length array.
* The array is nelem elements of elem_size, where the base structure
* is defined by base. If the size overflows size_t, return zero.
*/
static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
{
if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
return 0;
return base + nelem * elem_size;
}
/*
* Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
* We need to calculate the timeout in terms of number of jiffies
* between the specified timeout and the current CLOCK_MONOTONIC time.
*/
static inline unsigned long etnaviv_timeout_to_jiffies(
const struct drm_etnaviv_timespec *timeout)
{
struct timespec64 ts, to = {
.tv_sec = timeout->tv_sec,
.tv_nsec = timeout->tv_nsec,
};
ktime_get_ts64(&ts);
/* timeouts before "now" have already expired */
if (timespec64_compare(&to, &ts) <= 0)
return 0;
ts = timespec64_sub(to, ts);
return timespec64_to_jiffies(&ts);
}
#endif /* __ETNAVIV_DRV_H__ */