2018-05-01 01:10:58 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/* Copyright (C) 2015-2018 Broadcom */
|
|
|
|
|
2018-07-04 21:55:57 +07:00
|
|
|
#include <linux/mm_types.h>
|
2018-05-01 01:10:58 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/drm_encoder.h>
|
|
|
|
#include <drm/drm_gem.h>
|
2019-03-14 23:34:51 +07:00
|
|
|
#include <drm/drm_gem_shmem_helper.h>
|
2018-05-01 01:10:58 +07:00
|
|
|
#include <drm/gpu_scheduler.h>
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
#include "uapi/drm/v3d_drm.h"
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
#define GMP_GRANULARITY (128 * 1024)
|
|
|
|
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
/* Enum for each of the V3D queues. */
|
2018-05-01 01:10:58 +07:00
|
|
|
enum v3d_queue {
|
|
|
|
V3D_BIN,
|
|
|
|
V3D_RENDER,
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
V3D_TFU,
|
2018-05-01 01:10:58 +07:00
|
|
|
};
|
|
|
|
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
#define V3D_MAX_QUEUES (V3D_TFU + 1)
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
struct v3d_queue_state {
|
|
|
|
struct drm_gpu_scheduler sched;
|
|
|
|
|
|
|
|
u64 fence_context;
|
|
|
|
u64 emit_seqno;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v3d_dev {
|
|
|
|
struct drm_device drm;
|
|
|
|
|
|
|
|
/* Short representation (e.g. 33, 41) of the V3D tech version
|
|
|
|
* and revision.
|
|
|
|
*/
|
|
|
|
int ver;
|
2019-03-09 00:43:36 +07:00
|
|
|
bool single_irq_line;
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
struct device *dev;
|
|
|
|
struct platform_device *pdev;
|
|
|
|
void __iomem *hub_regs;
|
|
|
|
void __iomem *core_regs[3];
|
|
|
|
void __iomem *bridge_regs;
|
|
|
|
void __iomem *gca_regs;
|
|
|
|
struct clk *clk;
|
2019-03-09 00:43:36 +07:00
|
|
|
struct reset_control *reset;
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
/* Virtual and DMA addresses of the single shared page table. */
|
|
|
|
volatile u32 *pt;
|
|
|
|
dma_addr_t pt_paddr;
|
|
|
|
|
|
|
|
/* Virtual and DMA addresses of the MMU's scratch page. When
|
|
|
|
* a read or write is invalid in the MMU, it will be
|
|
|
|
* redirected here.
|
|
|
|
*/
|
|
|
|
void *mmu_scratch;
|
|
|
|
dma_addr_t mmu_scratch_paddr;
|
|
|
|
|
|
|
|
/* Number of V3D cores. */
|
|
|
|
u32 cores;
|
|
|
|
|
|
|
|
/* Allocator managing the address space. All units are in
|
|
|
|
* number of pages.
|
|
|
|
*/
|
|
|
|
struct drm_mm mm;
|
|
|
|
spinlock_t mm_lock;
|
|
|
|
|
|
|
|
struct work_struct overflow_mem_work;
|
|
|
|
|
|
|
|
struct v3d_exec_info *bin_job;
|
|
|
|
struct v3d_exec_info *render_job;
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
struct v3d_tfu_job *tfu_job;
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
struct v3d_queue_state queue[V3D_MAX_QUEUES];
|
|
|
|
|
|
|
|
/* Spinlock used to synchronize the overflow memory
|
|
|
|
* management against bin job submission.
|
|
|
|
*/
|
|
|
|
spinlock_t job_lock;
|
|
|
|
|
|
|
|
/* Protects bo_stats */
|
|
|
|
struct mutex bo_lock;
|
|
|
|
|
|
|
|
/* Lock taken when resetting the GPU, to keep multiple
|
|
|
|
* processes from trying to park the scheduler threads and
|
|
|
|
* reset at once.
|
|
|
|
*/
|
|
|
|
struct mutex reset_lock;
|
|
|
|
|
2018-06-07 00:48:51 +07:00
|
|
|
/* Lock taken when creating and pushing the GPU scheduler
|
|
|
|
* jobs, to keep the sched-fence seqnos in order.
|
|
|
|
*/
|
|
|
|
struct mutex sched_lock;
|
|
|
|
|
2018-05-01 01:10:58 +07:00
|
|
|
struct {
|
|
|
|
u32 num_allocated;
|
|
|
|
u32 pages_allocated;
|
|
|
|
} bo_stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct v3d_dev *
|
|
|
|
to_v3d_dev(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
return (struct v3d_dev *)dev->dev_private;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The per-fd struct, which tracks the MMU mappings. */
|
|
|
|
struct v3d_file_priv {
|
|
|
|
struct v3d_dev *v3d;
|
|
|
|
|
|
|
|
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v3d_bo {
|
2019-03-14 23:34:51 +07:00
|
|
|
struct drm_gem_shmem_object base;
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
struct drm_mm_node node;
|
|
|
|
|
|
|
|
/* List entry for the BO's position in
|
|
|
|
* v3d_exec_info->unref_list
|
|
|
|
*/
|
|
|
|
struct list_head unref_head;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct v3d_bo *
|
|
|
|
to_v3d_bo(struct drm_gem_object *bo)
|
|
|
|
{
|
|
|
|
return (struct v3d_bo *)bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct v3d_fence {
|
|
|
|
struct dma_fence base;
|
|
|
|
struct drm_device *dev;
|
|
|
|
/* v3d seqno for signaled() test */
|
|
|
|
u64 seqno;
|
|
|
|
enum v3d_queue queue;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct v3d_fence *
|
|
|
|
to_v3d_fence(struct dma_fence *fence)
|
|
|
|
{
|
|
|
|
return (struct v3d_fence *)fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define V3D_READ(offset) readl(v3d->hub_regs + offset)
|
|
|
|
#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
|
|
|
|
|
|
|
|
#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
|
|
|
|
#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
|
|
|
|
|
|
|
|
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
|
|
|
|
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
|
|
|
|
|
|
|
|
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
|
|
|
|
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
|
|
|
|
|
|
|
|
struct v3d_job {
|
|
|
|
struct drm_sched_job base;
|
|
|
|
|
|
|
|
struct v3d_exec_info *exec;
|
|
|
|
|
|
|
|
/* An optional fence userspace can pass in for the job to depend on. */
|
|
|
|
struct dma_fence *in_fence;
|
|
|
|
|
|
|
|
/* v3d fence to be signaled by IRQ handler when the job is complete. */
|
2019-03-14 06:52:09 +07:00
|
|
|
struct dma_fence *irq_fence;
|
2018-05-01 01:10:58 +07:00
|
|
|
|
|
|
|
/* GPU virtual addresses of the start/end of the CL job. */
|
|
|
|
u32 start, end;
|
2018-07-04 00:05:12 +07:00
|
|
|
|
|
|
|
u32 timedout_ctca, timedout_ctra;
|
2018-05-01 01:10:58 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct v3d_exec_info {
|
|
|
|
struct v3d_dev *v3d;
|
|
|
|
|
|
|
|
struct v3d_job bin, render;
|
|
|
|
|
|
|
|
/* Fence for when the scheduler considers the binner to be
|
|
|
|
* done, for render to depend on.
|
|
|
|
*/
|
|
|
|
struct dma_fence *bin_done_fence;
|
|
|
|
|
2018-09-29 06:21:23 +07:00
|
|
|
/* Fence for when the scheduler considers the render to be
|
|
|
|
* done, for when the BOs reservations should be complete.
|
|
|
|
*/
|
|
|
|
struct dma_fence *render_done_fence;
|
|
|
|
|
2018-05-01 01:10:58 +07:00
|
|
|
struct kref refcount;
|
|
|
|
|
|
|
|
/* This is the array of BOs that were looked up at the start of exec. */
|
|
|
|
struct v3d_bo **bo;
|
|
|
|
u32 bo_count;
|
|
|
|
|
|
|
|
/* List of overflow BOs used in the job that need to be
|
|
|
|
* released once the job is complete.
|
|
|
|
*/
|
|
|
|
struct list_head unref_list;
|
|
|
|
|
|
|
|
/* Submitted tile memory allocation start/size, tile state. */
|
|
|
|
u32 qma, qms, qts;
|
|
|
|
};
|
|
|
|
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
struct v3d_tfu_job {
|
|
|
|
struct drm_sched_job base;
|
|
|
|
|
|
|
|
struct drm_v3d_submit_tfu args;
|
|
|
|
|
|
|
|
/* An optional fence userspace can pass in for the job to depend on. */
|
|
|
|
struct dma_fence *in_fence;
|
|
|
|
|
|
|
|
/* v3d fence to be signaled by IRQ handler when the job is complete. */
|
2019-03-14 06:52:09 +07:00
|
|
|
struct dma_fence *irq_fence;
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
|
|
|
|
struct v3d_dev *v3d;
|
|
|
|
|
|
|
|
struct kref refcount;
|
|
|
|
|
|
|
|
/* This is the array of BOs that were looked up at the start of exec. */
|
|
|
|
struct v3d_bo *bo[4];
|
|
|
|
};
|
|
|
|
|
2018-05-01 01:10:58 +07:00
|
|
|
/**
|
|
|
|
* _wait_for - magic (register) wait macro
|
|
|
|
*
|
|
|
|
* Does the right thing for modeset paths when run under kdgb or similar atomic
|
|
|
|
* contexts. Note that it's important that we check the condition again after
|
|
|
|
* having timed out, since the timeout could be due to preemption or similar and
|
|
|
|
* we've never had a chance to check the condition before the timeout.
|
|
|
|
*/
|
|
|
|
#define wait_for(COND, MS) ({ \
|
|
|
|
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
|
|
|
|
int ret__ = 0; \
|
|
|
|
while (!(COND)) { \
|
|
|
|
if (time_after(jiffies, timeout__)) { \
|
|
|
|
if (!(COND)) \
|
|
|
|
ret__ = -ETIMEDOUT; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
msleep(1); \
|
|
|
|
} \
|
|
|
|
ret__; \
|
|
|
|
})
|
|
|
|
|
|
|
|
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
|
|
|
|
{
|
|
|
|
/* nsecs_to_jiffies64() does not guard against overflow */
|
|
|
|
if (NSEC_PER_SEC % HZ &&
|
|
|
|
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
|
|
|
|
return MAX_JIFFY_OFFSET;
|
|
|
|
|
|
|
|
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* v3d_bo.c */
|
2019-03-14 23:34:51 +07:00
|
|
|
struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
|
2018-05-01 01:10:58 +07:00
|
|
|
void v3d_free_object(struct drm_gem_object *gem_obj);
|
|
|
|
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
|
|
|
|
size_t size);
|
|
|
|
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
|
|
|
|
struct dma_buf_attachment *attach,
|
|
|
|
struct sg_table *sgt);
|
|
|
|
|
|
|
|
/* v3d_debugfs.c */
|
|
|
|
int v3d_debugfs_init(struct drm_minor *minor);
|
|
|
|
|
|
|
|
/* v3d_fence.c */
|
|
|
|
extern const struct dma_fence_ops v3d_fence_ops;
|
|
|
|
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
|
|
|
|
|
|
|
|
/* v3d_gem.c */
|
|
|
|
int v3d_gem_init(struct drm_device *dev);
|
|
|
|
void v3d_gem_destroy(struct drm_device *dev);
|
|
|
|
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
2018-05-01 01:10:58 +07:00
|
|
|
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv);
|
|
|
|
void v3d_exec_put(struct v3d_exec_info *exec);
|
drm/v3d: Add support for submitting jobs to the TFU.
The TFU can copy from raster, UIF, and SAND input images to UIF output
images, with optional mipmap generation. This will certainly be
useful for media EGL image input, but is also useful immediately for
mipmap generation without bogging the V3D core down.
For now we only run the queue 1 job deep, and don't have any hang
recovery (though I don't think we should need it, with TFU). Queuing
multiple jobs in the HW will require synchronizing the YUV coefficient
regs updates since they don't get FIFOed with the job.
v2: Change the ioctl to IOW instead of IOWR, always set COEF0, explain
why TFU is AUTH, clarify the syncing docs, drop the unused TFU
interrupt regs (you're expected to use the hub's), don't take
&bo->base for NULL bos.
v3: Fix a little whitespace alignment (noticed by checkpatch), rebase
on drm_sched_job_cleanup() changes.
Signed-off-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Dave Emett <david.emett@broadcom.com> (v2)
Link: https://patchwork.freedesktop.org/patch/264607/
2018-11-29 06:09:25 +07:00
|
|
|
void v3d_tfu_job_put(struct v3d_tfu_job *exec);
|
2018-05-01 01:10:58 +07:00
|
|
|
void v3d_reset(struct v3d_dev *v3d);
|
|
|
|
void v3d_invalidate_caches(struct v3d_dev *v3d);
|
|
|
|
|
|
|
|
/* v3d_irq.c */
|
2019-03-09 00:43:35 +07:00
|
|
|
int v3d_irq_init(struct v3d_dev *v3d);
|
2018-05-01 01:10:58 +07:00
|
|
|
void v3d_irq_enable(struct v3d_dev *v3d);
|
|
|
|
void v3d_irq_disable(struct v3d_dev *v3d);
|
|
|
|
void v3d_irq_reset(struct v3d_dev *v3d);
|
|
|
|
|
|
|
|
/* v3d_mmu.c */
|
|
|
|
int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
|
|
|
|
u32 *offset);
|
|
|
|
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
|
|
|
|
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
|
|
|
|
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
|
|
|
|
|
|
|
|
/* v3d_sched.c */
|
|
|
|
int v3d_sched_init(struct v3d_dev *v3d);
|
|
|
|
void v3d_sched_fini(struct v3d_dev *v3d);
|