2018-05-08 21:20:54 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2015-12-04 00:21:29 +07:00
|
|
|
/*
|
2018-05-08 21:20:54 +07:00
|
|
|
* Copyright (C) 2015-2018 Etnaviv Project
|
2015-12-04 00:21:29 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ETNAVIV_GEM_H__
|
|
|
|
#define __ETNAVIV_GEM_H__
|
|
|
|
|
|
|
|
#include <linux/reservation.h>
|
2017-11-24 22:56:37 +07:00
|
|
|
#include "etnaviv_cmdbuf.h"
|
2015-12-04 00:21:29 +07:00
|
|
|
#include "etnaviv_drv.h"
|
|
|
|
|
2017-03-22 19:00:53 +07:00
|
|
|
struct dma_fence;
|
2015-12-04 00:21:29 +07:00
|
|
|
struct etnaviv_gem_ops;
|
|
|
|
struct etnaviv_gem_object;
|
|
|
|
|
|
|
|
struct etnaviv_gem_userptr {
|
|
|
|
uintptr_t ptr;
|
2017-11-17 20:16:10 +07:00
|
|
|
struct mm_struct *mm;
|
2015-12-04 00:21:29 +07:00
|
|
|
bool ro;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct etnaviv_vram_mapping {
|
|
|
|
struct list_head obj_node;
|
|
|
|
struct list_head scan_node;
|
|
|
|
struct list_head mmu_node;
|
|
|
|
struct etnaviv_gem_object *object;
|
drm/etnaviv: rework MMU handling
This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:
1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.
2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.
This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.
As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.
This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
2019-07-06 00:17:24 +07:00
|
|
|
struct etnaviv_iommu_context *context;
|
2015-12-04 00:21:29 +07:00
|
|
|
struct drm_mm_node vram_node;
|
|
|
|
unsigned int use;
|
|
|
|
u32 iova;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct etnaviv_gem_object {
|
|
|
|
struct drm_gem_object base;
|
|
|
|
const struct etnaviv_gem_ops *ops;
|
|
|
|
struct mutex lock;
|
|
|
|
|
|
|
|
u32 flags;
|
|
|
|
|
|
|
|
struct list_head gem_node;
|
|
|
|
struct etnaviv_gpu *gpu; /* non-null if active */
|
|
|
|
atomic_t gpu_active;
|
|
|
|
u32 access;
|
|
|
|
|
|
|
|
struct page **pages;
|
|
|
|
struct sg_table *sgt;
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
struct list_head vram_list;
|
|
|
|
|
|
|
|
/* cache maintenance */
|
|
|
|
u32 last_cpu_prep_op;
|
|
|
|
|
|
|
|
struct etnaviv_gem_userptr userptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
return container_of(obj, struct etnaviv_gem_object, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct etnaviv_gem_ops {
|
|
|
|
int (*get_pages)(struct etnaviv_gem_object *);
|
|
|
|
void (*release)(struct etnaviv_gem_object *);
|
2016-01-25 21:47:28 +07:00
|
|
|
void *(*vmap)(struct etnaviv_gem_object *);
|
2016-04-27 17:27:02 +07:00
|
|
|
int (*mmap)(struct etnaviv_gem_object *, struct vm_area_struct *);
|
2015-12-04 00:21:29 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
|
|
|
|
{
|
|
|
|
return atomic_read(&etnaviv_obj->gpu_active) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_CMDS 4
|
|
|
|
|
2016-01-21 22:20:55 +07:00
|
|
|
struct etnaviv_gem_submit_bo {
|
|
|
|
u32 flags;
|
|
|
|
struct etnaviv_gem_object *obj;
|
|
|
|
struct etnaviv_vram_mapping *mapping;
|
2017-12-05 01:24:06 +07:00
|
|
|
struct dma_fence *excl;
|
|
|
|
unsigned int nr_shared;
|
|
|
|
struct dma_fence **shared;
|
2016-01-21 22:20:55 +07:00
|
|
|
};
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
|
|
|
|
* associated with the cmdstream submission for synchronization (and
|
2017-11-24 18:02:38 +07:00
|
|
|
* make it easier to unwind when things go wrong, etc).
|
2015-12-04 00:21:29 +07:00
|
|
|
*/
|
|
|
|
struct etnaviv_gem_submit {
|
2017-12-05 00:41:58 +07:00
|
|
|
struct drm_sched_job sched_job;
|
2017-11-24 17:36:03 +07:00
|
|
|
struct kref refcount;
|
2018-11-23 22:26:04 +07:00
|
|
|
struct etnaviv_file_private *ctx;
|
2015-12-04 00:21:29 +07:00
|
|
|
struct etnaviv_gpu *gpu;
|
2017-11-24 00:02:43 +07:00
|
|
|
struct dma_fence *out_fence, *in_fence;
|
2017-11-29 20:49:04 +07:00
|
|
|
int out_fence_id;
|
2017-11-24 22:56:37 +07:00
|
|
|
struct list_head node; /* GPU active submit list */
|
|
|
|
struct etnaviv_cmdbuf cmdbuf;
|
2017-11-24 23:56:29 +07:00
|
|
|
bool runtime_resumed;
|
2017-11-24 21:16:58 +07:00
|
|
|
u32 exec_state;
|
2017-06-27 21:02:51 +07:00
|
|
|
u32 flags;
|
2017-11-24 18:02:38 +07:00
|
|
|
unsigned int nr_pmrs;
|
|
|
|
struct etnaviv_perfmon_request *pmrs;
|
2015-12-04 00:21:29 +07:00
|
|
|
unsigned int nr_bos;
|
2016-01-21 22:20:55 +07:00
|
|
|
struct etnaviv_gem_submit_bo bos[0];
|
2017-06-27 21:02:51 +07:00
|
|
|
/* No new members here, the previous one is variable-length! */
|
2015-12-04 00:21:29 +07:00
|
|
|
};
|
|
|
|
|
2017-11-24 17:36:03 +07:00
|
|
|
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
|
|
|
|
struct timespec *timeout);
|
|
|
|
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
|
|
|
|
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
|
|
|
|
struct etnaviv_gem_object **res);
|
2017-11-17 18:17:14 +07:00
|
|
|
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
|
2015-12-04 00:21:29 +07:00
|
|
|
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
|
|
|
|
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
|
|
|
|
|
2016-01-21 22:20:50 +07:00
|
|
|
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
|
2019-07-06 00:17:26 +07:00
|
|
|
struct drm_gem_object *obj, struct etnaviv_gpu *gpu,
|
|
|
|
struct etnaviv_iommu_context *mmu_context);
|
2016-01-21 22:20:50 +07:00
|
|
|
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
|
|
|
|
|
2015-12-04 00:21:29 +07:00
|
|
|
#endif /* __ETNAVIV_GEM_H__ */
|