mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 23:02:27 +07:00
7282f7645d
Up until now, a single shared GPU address space was used. This is not ideal as there's no protection between processes and doesn't work for supporting the same GPU/CPU VA feature. Most importantly, this will hopefully mitigate Alyssa's fear of WebGL, whatever that is. Most of the changes here are moving struct drm_mm and struct panfrost_mmu objects from the per device struct to the per FD struct. The critical function is panfrost_mmu_as_get() which handles allocating and switching the h/w address spaces. There's 3 states an AS can be in: free, allocated, and in use. When a job runs, it requests an address space and then marks it not in use when job is complete(but stays assigned). The first time thru, we find a free AS in the alloc_mask and assign the AS to the FD. Then the next time thru, we most likely already have our AS and we just mark it in use with a ref count. We need a ref count because we have multiple job slots. If the job/FD doesn't have an AS assigned and there are no free ones, then we pick an allocated one not in use from our LRU list and switch the AS from the old FD to the new one. Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Steven Price <steven.price@arm.com> Signed-off-by: Rob Herring <robh@kernel.org> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190813150115.30338-1-robh@kernel.org
52 lines
1.3 KiB
C
52 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
|
|
|
|
#ifndef __PANFROST_GEM_H__
|
|
#define __PANFROST_GEM_H__
|
|
|
|
#include <drm/drm_gem_shmem_helper.h>
|
|
#include <drm/drm_mm.h>
|
|
|
|
struct panfrost_mmu;
|
|
|
|
struct panfrost_gem_object {
|
|
struct drm_gem_shmem_object base;
|
|
struct sg_table *sgts;
|
|
|
|
struct panfrost_mmu *mmu;
|
|
struct drm_mm_node node;
|
|
bool is_mapped :1;
|
|
bool noexec :1;
|
|
bool is_heap :1;
|
|
};
|
|
|
|
static inline
|
|
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
|
|
{
|
|
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
|
|
}
|
|
|
|
static inline
|
|
struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
|
|
{
|
|
return container_of(node, struct panfrost_gem_object, node);
|
|
}
|
|
|
|
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
|
|
|
|
struct drm_gem_object *
|
|
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
|
|
struct dma_buf_attachment *attach,
|
|
struct sg_table *sgt);
|
|
|
|
struct panfrost_gem_object *
|
|
panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
|
struct drm_device *dev, size_t size,
|
|
u32 flags,
|
|
uint32_t *handle);
|
|
|
|
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
|
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
|
|
|
#endif /* __PANFROST_GEM_H__ */
|