mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 08:50:54 +07:00
drm/radeon: add a way to get and set initial buffer domains v2
When passing buffers between processes, the receiving process needs to know the original buffer domain, so that it doesn't accidentally move the buffer. v2: reserve the buffer Signed-off-by: Marek Olšák <marek.olsak@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
14a9579ddb
commit
bda72d58a2
@ -456,6 +456,7 @@ struct radeon_bo {
|
||||
/* Protected by gem.mutex */
|
||||
struct list_head list;
|
||||
/* Protected by tbo.reserved */
|
||||
u32 initial_domain;
|
||||
u32 placements[3];
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
@ -2116,6 +2117,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
|
||||
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
|
@ -79,9 +79,10 @@
|
||||
* 2.35.0 - Add CIK macrotile mode array query
|
||||
* 2.36.0 - Fix CIK DCE tiling setup
|
||||
* 2.37.0 - allow GS ring setup on r6xx/r7xx
|
||||
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN)
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 37
|
||||
#define KMS_DRIVER_MINOR 38
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
|
@ -533,6 +533,42 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_radeon_gem_op *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
r = radeon_bo_reserve(robj, false);
|
||||
if (unlikely(r))
|
||||
goto out;
|
||||
|
||||
switch (args->op) {
|
||||
case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
|
||||
args->value = robj->initial_domain;
|
||||
break;
|
||||
case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
|
||||
robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
|
||||
RADEON_GEM_DOMAIN_GTT |
|
||||
RADEON_GEM_DOMAIN_CPU);
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
radeon_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
|
@ -814,5 +814,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
|
||||
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
};
|
||||
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
|
||||
|
@ -145,6 +145,9 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
bo->surface_reg = -1;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
|
||||
RADEON_GEM_DOMAIN_GTT |
|
||||
RADEON_GEM_DOMAIN_CPU);
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
|
@ -510,6 +510,7 @@ typedef struct {
|
||||
#define DRM_RADEON_GEM_GET_TILING 0x29
|
||||
#define DRM_RADEON_GEM_BUSY 0x2a
|
||||
#define DRM_RADEON_GEM_VA 0x2b
|
||||
#define DRM_RADEON_GEM_OP 0x2c
|
||||
|
||||
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
|
||||
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
|
||||
@ -552,6 +553,7 @@ typedef struct {
|
||||
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
|
||||
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
|
||||
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
|
||||
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
|
||||
|
||||
typedef struct drm_radeon_init {
|
||||
enum {
|
||||
@ -884,6 +886,16 @@ struct drm_radeon_gem_pwrite {
|
||||
uint64_t data_ptr;
|
||||
};
|
||||
|
||||
/* Sets or returns a value associated with a buffer. */
|
||||
struct drm_radeon_gem_op {
|
||||
uint32_t handle; /* buffer */
|
||||
uint32_t op; /* RADEON_GEM_OP_* */
|
||||
uint64_t value; /* input or return value */
|
||||
};
|
||||
|
||||
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
|
||||
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
|
||||
|
||||
#define RADEON_VA_MAP 1
|
||||
#define RADEON_VA_UNMAP 2
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user